diff options
Diffstat (limited to 'drivers/net')
1187 files changed, 75004 insertions, 29974 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 10506a4b66ef..b2a4f998c180 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -81,7 +81,6 @@ config WIREGUARD select CRYPTO select CRYPTO_LIB_CURVE25519 select CRYPTO_LIB_CHACHA20POLY1305 - select CRYPTO_LIB_BLAKE2S select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT select CRYPTO_POLY1305_X86_64 if X86 && 64BIT select CRYPTO_BLAKE2S_X86 if X86 && 64BIT @@ -567,9 +566,7 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET - depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ - IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \ - PPC_64K_PAGES) + depends on PAGE_SIZE_LESS_THAN_64KB help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 47a04c330885..f1a36d7e2151 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -11,6 +11,7 @@ #include <linux/net.h> #include <linux/igmp.h> #include <linux/workqueue.h> +#include <net/sch_generic.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/udp.h> @@ -1106,7 +1107,7 @@ static bool amt_send_membership_query(struct amt_dev *amt, rt = ip_route_output_key(amt->net, &fl4); if (IS_ERR(rt)) { netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4); - return -1; + return true; } amtmq = skb_push(skb, sizeof(*amtmq)); @@ -3286,7 +3287,7 @@ static void __exit amt_fini(void) { rtnl_link_unregister(&amt_link_ops); unregister_netdevice_notifier(&amt_notifier_block); - cancel_delayed_work(&source_gc_wq); + cancel_delayed_work_sync(&source_gc_wq); __amt_source_gc_work(); destroy_workqueue(amt_wq); } diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 5566daefbff4..d558535390f9 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -23,6 +23,7 @@ * of the GNU General Public License, incorporated herein by reference. */ +#include <linux/compat.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index edffc3489a12..ba587e5fc24f 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -38,6 +38,13 @@ struct bareudp_net { struct list_head bareudp_list; }; +struct bareudp_conf { + __be16 ethertype; + __be16 port; + u16 sport_min; + bool multi_proto_mode; +}; + /* Pseudo network device */ struct bareudp_dev { struct net *net; /* netns for packet i/o */ @@ -602,7 +609,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, } static int bareudp_configure(struct net *net, struct net_device *dev, - struct bareudp_conf *conf) + struct bareudp_conf *conf, + struct netlink_ext_ack *extack) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); struct bareudp_dev *t, *bareudp = netdev_priv(dev); @@ -611,13 +619,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->net = net; bareudp->dev = dev; t = bareudp_find_dev(bn, conf); - if (t) + if (t) { + NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists"); return -EBUSY; + } if (conf->multi_proto_mode && (conf->ethertype != htons(ETH_P_MPLS_UC) && - conf->ethertype != htons(ETH_P_IP))) + conf->ethertype != htons(ETH_P_IP))) { + NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)"); return -EINVAL; + } bareudp->port = conf->port; bareudp->ethertype = conf->ethertype; @@ -664,7 +676,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev, if (err) return err; - err = bareudp_configure(net, dev, &conf); + err = bareudp_configure(net, dev, &conf, extack); if (err) return err; @@ -721,40 +733,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = { .fill_info = bareudp_fill_info, }; -struct net_device *bareudp_dev_create(struct net *net, const char *name, - u8 name_assign_type, - struct bareudp_conf *conf) -{ - struct nlattr *tb[IFLA_MAX + 1]; - struct net_device *dev; - int err; - - memset(tb, 0, sizeof(tb)); - dev = rtnl_create_link(net, name, name_assign_type, - &bareudp_link_ops, tb, NULL); - if (IS_ERR(dev)) - return dev; - - err = bareudp_configure(net, dev, conf); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } - err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN); - if (err) - goto err; - - err = rtnl_configure_link(dev, NULL); - if (err < 0) - goto err; - - return dev; -err: - bareudp_dellink(dev, NULL); - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(bareudp_dev_create); - static __net_init int bareudp_init_net(struct net *net) { struct bareudp_net *bn = net_generic(net, bareudp_net_id); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 2ec8e015c7b3..533e476988f2 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work) struct slave *slave; if (!bond_has_slaves(bond)) { - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); bond_info->lp_counter = 0; goto re_arm; } rcu_read_lock(); - bond_info->tx_rebalance_counter++; + atomic_inc(&bond_info->tx_rebalance_counter); bond_info->lp_counter++; /* send learning packets */ @@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work) } /* rebalance tx traffic */ - if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { + if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) { bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { @@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work) bond_info->unbalanced_load = 0; } } - bond_info->tx_rebalance_counter = 0; + atomic_set(&bond_info->tx_rebalance_counter, 0); } if (bond_info->rlb_enabled) { @@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) tlb_init_slave(slave); /* order a rebalance ASAP */ - bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond->alb_info.tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) bond->alb_info.rlb_rebalance = 1; @@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char rlb_clear_slave(bond, slave); } else if (link == BOND_LINK_UP) { /* order a rebalance ASAP */ - bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS; + atomic_set(&bond_info->tx_rebalance_counter, + BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) { bond->alb_info.rlb_rebalance = 1; /* If the updelay module parameter is smaller than the diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ff8da720a33a..07fc603c2fa7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -35,6 +35,7 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> +#include <linux/filter.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> @@ -71,6 +72,7 @@ #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/if_bonding.h> +#include <linux/phy.h> #include <linux/jiffies.h> #include <linux/preempt.h> #include <net/route.h> @@ -1096,9 +1098,6 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - if (!slave || !bond->send_peer_notif || bond->send_peer_notif % max(1, bond->params.peer_notif_delay) != 0 || @@ -1106,6 +1105,9 @@ static bool bond_should_notify_peers(struct bonding *bond) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + return true; } @@ -1460,7 +1462,7 @@ done: bond_dev->hw_enc_features |= xfrm_features; #endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->mpls_features = mpls_features; - bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_segs(bond_dev, gso_max_segs); netif_set_gso_max_size(bond_dev, gso_max_size); bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -3129,8 +3131,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (!bond_time_in_interval(bond, trans_start, 2) || - !bond_time_in_interval(bond, slave->last_rx, 2)) { + if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) || + !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { bond_propose_link_state(slave, BOND_LINK_DOWN); slave_state_changed = 1; @@ -3224,7 +3226,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) /* Backup slave is down if: * - No current_arp_slave AND - * - more than 3*delta since last receive AND + * - more than (missed_max+1)*delta since last receive AND * - the bond has an IP address * * Note: a non-null current_arp_slave indicates @@ -3236,20 +3238,20 @@ static int bond_ab_arp_inspect(struct bonding *bond) */ if (!bond_is_active_slave(slave) && !rcu_access_pointer(bond->current_arp_slave) && - !bond_time_in_interval(bond, last_rx, 3)) { + !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) { bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } /* Active slave is down if: - * - more than 2*delta since transmitting OR - * - (more than 2*delta since receive AND + * - more than missed_max*delta since transmitting OR + * - (more than missed_max*delta since receive AND * the bond has an IP address) */ trans_start = dev_trans_start(slave->dev); if (bond_is_active_slave(slave) && - (!bond_time_in_interval(bond, trans_start, 2) || - !bond_time_in_interval(bond, last_rx, 2))) { + (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) || + !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { bond_propose_link_state(slave, BOND_LINK_DOWN); commit++; } @@ -4091,7 +4093,11 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm { struct bonding *bond = netdev_priv(bond_dev); struct mii_ioctl_data *mii = NULL; - int res; + const struct net_device_ops *ops; + struct net_device *real_dev; + struct hwtstamp_config cfg; + struct ifreq ifrr; + int res = 0; netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd); @@ -4117,7 +4123,42 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm mii->val_out = BMSR_LSTATUS; } - return 0; + break; + case SIOCSHWTSTAMP: + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX)) + return -EOPNOTSUPP; + + fallthrough; + case SIOCGHWTSTAMP: + rcu_read_lock(); + real_dev = bond_option_active_slave_get_rcu(bond); + rcu_read_unlock(); + if (!real_dev) + return -EOPNOTSUPP; + + strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); + ifrr.ifr_ifru = ifr->ifr_ifru; + + ops = real_dev->netdev_ops; + if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) { + res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); + if (res) + return res; + + ifr->ifr_ifru = ifrr.ifr_ifru; + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* Set the BOND_PHC_INDEX flag to notify user space */ + cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX; + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? + -EFAULT : 0; + } + fallthrough; default: res = -EOPNOTSUPP; } @@ -5319,10 +5360,40 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, BOND_ABI_VERSION); } +static int bond_ethtool_get_ts_info(struct net_device *bond_dev, + struct ethtool_ts_info *info) +{ + struct bonding *bond = netdev_priv(bond_dev); + const struct ethtool_ops *ops; + struct net_device *real_dev; + struct phy_device *phydev; + + rcu_read_lock(); + real_dev = bond_option_active_slave_get_rcu(bond); + rcu_read_unlock(); + if (real_dev) { + ops = real_dev->ethtool_ops; + phydev = real_dev->phydev; + + if (phy_has_tsinfo(phydev)) { + return phy_ts_info(phydev, info); + } else if (ops->get_ts_info) { + return ops->get_ts_info(real_dev, info); + } + } + + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; +} + static const struct ethtool_ops bond_ethtool_ops = { .get_drvinfo = bond_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = bond_ethtool_get_link_ksettings, + .get_ts_info = bond_ethtool_get_ts_info, }; static const struct net_device_ops bond_netdev_ops = { @@ -5822,6 +5893,7 @@ static int bond_check_params(struct bond_params *params) params->arp_interval = arp_interval; params->arp_validate = arp_validate_value; params->arp_all_targets = arp_all_targets_value; + params->missed_max = 2; params->updelay = updelay; params->downdelay = downdelay; params->peer_notif_delay = 0; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 5d54e11d18fa..1007bf6d385d 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -110,6 +110,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { .len = ETH_ALEN }, [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 }, + [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -453,6 +454,15 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], return err; } + if (data[IFLA_BOND_MISSED_MAX]) { + int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]); + + bond_opt_initval(&newval, missed_max); + err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval); + if (err) + return err; + } + return 0; } @@ -515,6 +525,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */ + nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */ 0; } @@ -650,6 +661,10 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.tlb_dynamic_lb)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX, + bond->params.missed_max)) + goto nla_put_failure; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info info; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index a8fde3bc458f..2e8484a91a0e 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -78,6 +78,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_ad_user_port_key_set(struct bonding *bond, const struct bond_opt_value *newval); +static int bond_option_missed_max_set(struct bonding *bond, + const struct bond_opt_value *newval); static const struct bond_opt_value bond_mode_tbl[] = { @@ -213,6 +215,13 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = { { NULL, -1, 0}, }; +static const struct bond_opt_value bond_missed_max_tbl[] = { + { "minval", 1, BOND_VALFLAG_MIN}, + { "maxval", 255, BOND_VALFLAG_MAX}, + { "default", 2, BOND_VALFLAG_DEFAULT}, + { NULL, -1, 0}, +}; + static const struct bond_option bond_opts[BOND_OPT_LAST] = { [BOND_OPT_MODE] = { .id = BOND_OPT_MODE, @@ -270,6 +279,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .values = bond_intmax_tbl, .set = bond_option_arp_interval_set }, + [BOND_OPT_MISSED_MAX] = { + .id = BOND_OPT_MISSED_MAX, + .name = "arp_missed_max", + .desc = "Maximum number of missed ARP interval", + .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) | + BIT(BOND_MODE_ALB), + .values = bond_missed_max_tbl, + .set = bond_option_missed_max_set + }, [BOND_OPT_ARP_TARGETS] = { .id = BOND_OPT_ARP_TARGETS, .name = "arp_ip_target", @@ -1186,6 +1204,16 @@ static int bond_option_arp_all_targets_set(struct bonding *bond, return 0; } +static int bond_option_missed_max_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n", + newval->string, newval->value); + bond->params.missed_max = newval->value; + + return 0; +} + static int bond_option_primary_set(struct bonding *bond, const struct bond_opt_value *newval) { @@ -1526,7 +1554,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, mac = (u8 *)&newval->value; } - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) goto err; netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac); diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index f3e3bfd72556..2ec11af5f0cc 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq) seq_printf(seq, "ARP Polling Interval (ms): %d\n", bond->params.arp_interval); + seq_printf(seq, "ARP Missed Max: %u\n", + bond->params.missed_max); seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index c48b77167fab..9b5a5df23d21 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -303,6 +303,18 @@ static ssize_t bonding_show_arp_targets(struct device *d, static DEVICE_ATTR(arp_ip_target, 0644, bonding_show_arp_targets, bonding_sysfs_store_option); +/* Show the arp missed max. */ +static ssize_t bonding_show_missed_max(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%u\n", bond->params.missed_max); +} +static DEVICE_ATTR(arp_missed_max, 0644, + bonding_show_missed_max, bonding_sysfs_store_option); + /* Show the up and down delays. */ static ssize_t bonding_show_downdelay(struct device *d, struct device_attribute *attr, @@ -779,6 +791,7 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_ad_actor_sys_prio.attr, &dev_attr_ad_actor_system.attr, &dev_attr_ad_user_port_key.attr, + &dev_attr_arp_missed_max.attr, NULL, }; diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index a2b4463d8480..1e660afcb61b 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -16,7 +16,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ -obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o +obj-$(CONFIG_CAN_FLEXCAN) += flexcan/ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 3aea32c9b108..a00655ccda02 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -448,7 +448,6 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state) static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mb, prio; u32 reg_mid, reg_mcr; @@ -480,8 +479,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); - stats->tx_bytes += cf->len; - /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0); @@ -553,8 +550,6 @@ static void at91_rx_overflow_err(struct net_device *dev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_receive_skb(skb); } @@ -619,7 +614,9 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) at91_read_mb(dev, mb, cf); stats->rx_packets++; - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -779,8 +776,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) at91_poll_err_frame(dev, cf, reg_sr); - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -854,7 +849,10 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) if (likely(reg_msr & AT91_MSR_MRDY && ~reg_msr & AT91_MSR_MABT)) { /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ - can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); + dev->stats.tx_bytes += + can_get_echo_skb(dev, + mb - get_mb_tx_first(priv), + NULL); dev->stats.tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); } @@ -1037,8 +1035,6 @@ static void at91_irq_err(struct net_device *dev) at91_irq_err_state(dev, cf, new_state); - dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->len; netif_rx(skb); priv->can.state = new_state; diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 08b6efa7a1a7..bd2f6dc01194 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -211,7 +211,6 @@ struct c_can_priv { struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */ void (*raminit)(const struct c_can_priv *priv, bool enable); u32 comm_rcv_high; - u32 dlc[]; }; struct net_device *alloc_c_can_dev(int msg_obj_num); diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c index 377c7d2e7612..6655146294fc 100644 --- a/drivers/net/can/c_can/c_can_ethtool.c +++ b/drivers/net/can/c_can/c_can_ethtool.c @@ -20,7 +20,9 @@ static void c_can_get_drvinfo(struct net_device *netdev, } static void c_can_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct c_can_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index 52671d1ea17d..faa217f26771 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -403,10 +403,10 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) frame->data[i + 1] = data >> 8; } } - } + stats->rx_bytes += frame->len; + } stats->rx_packets++; - stats->rx_bytes += frame->len; netif_receive_skb(skb); return 0; @@ -477,7 +477,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, * transmit as we might race against do_tx(). */ c_can_setup_tx_object(dev, IF_TX, frame, idx); - priv->dlc[idx] = frame->len; can_put_echo_skb(skb, dev, idx, 0); obj = idx + priv->msg_obj_tx_first; c_can_object_put(dev, IF_TX, obj, cmd); @@ -742,8 +741,7 @@ static void c_can_do_tx(struct net_device *dev) * NAPI. We are not transmitting. */ c_can_inval_tx_object(dev, IF_NAPI, obj); - can_get_echo_skb(dev, idx, NULL); - bytes += priv->dlc[idx]; + bytes += can_get_echo_skb(dev, idx, NULL); pkts++; } @@ -920,7 +918,6 @@ static int c_can_handle_state_change(struct net_device *dev, unsigned int reg_err_counter; unsigned int rx_err_passive; struct c_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; @@ -996,8 +993,6 @@ static int c_can_handle_state_change(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -1064,8 +1059,6 @@ static int c_can_handle_bus_err(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; } @@ -1232,8 +1225,7 @@ struct net_device *alloc_c_can_dev(int msg_obj_num) struct c_can_priv *priv; int msg_obj_tx_num = msg_obj_num / 2; - dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num), - msg_obj_tx_num); + dev = alloc_candev(sizeof(*priv), msg_obj_tx_num); if (!dev) return NULL; diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index f8a130f594e2..bb7224cfc6ab 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -489,17 +489,17 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) cf->len = can_cc_dlc2len((config & 0xf0) >> 4); for (i = 0; i < cf->len; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); - } + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; u8 lec; @@ -571,8 +571,6 @@ static int cc770_err(struct net_device *dev, u8 status) } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -666,7 +664,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); - struct can_frame *cf; u8 ctrl1; ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); @@ -698,12 +695,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) return; } - cf = (struct can_frame *)priv->tx_skb->data; - stats->tx_bytes += cf->len; - stats->tx_packets++; - can_put_echo_skb(priv->tx_skb, dev, 0, 0); - can_get_echo_skb(dev, 0, NULL); + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); + stats->tx_packets++; priv->tx_skb = NULL; netif_wake_queue(dev); diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index 0509625c3082..d5fca3bfaf9a 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -4,6 +4,7 @@ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> */ +#include <linux/units.h> #include <linux/can/dev.h> #ifdef CONFIG_CAN_CALC_BITTIMING @@ -81,9 +82,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, if (bt->sample_point) { sample_point_nominal = bt->sample_point; } else { - if (bt->bitrate > 800 * CAN_KBPS) + if (bt->bitrate > 800 * KILO /* BPS */) sample_point_nominal = 750; - else if (bt->bitrate > 500 * CAN_KBPS) + else if (bt->bitrate > 500 * KILO /* BPS */) sample_point_nominal = 800; else sample_point_nominal = 875; diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index e3d840b81357..c192f25f9695 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(can_change_state); static void can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *cf; int err; @@ -155,9 +154,6 @@ static void can_restart(struct net_device *dev) cf->can_id |= CAN_ERR_RESTARTED; - stats->rx_packets++; - stats->rx_bytes += cf->len; - netif_rx_ni(skb); restart: @@ -300,6 +296,7 @@ EXPORT_SYMBOL_GPL(free_candev); int can_change_mtu(struct net_device *dev, int new_mtu) { struct can_priv *priv = netdev_priv(dev); + u32 ctrlmode_static = can_get_static_ctrlmode(priv); /* Do not allow changing the MTU while running */ if (dev->flags & IFF_UP) @@ -309,7 +306,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu) switch (new_mtu) { case CAN_MTU: /* 'CANFD-only' controllers can not switch to CAN_MTU */ - if (priv->ctrlmode_static & CAN_CTRLMODE_FD) + if (ctrlmode_static & CAN_CTRLMODE_FD) return -EINVAL; priv->ctrlmode &= ~CAN_CTRLMODE_FD; @@ -318,7 +315,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu) case CANFD_MTU: /* check for potential CANFD ability */ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) && - !(priv->ctrlmode_static & CAN_CTRLMODE_FD)) + !(ctrlmode_static & CAN_CTRLMODE_FD)) return -EINVAL; priv->ctrlmode |= CAN_CTRLMODE_FD; diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 95cca4e5251f..7633d98e3912 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -21,6 +21,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, [IFLA_CAN_TDC] = { .type = NLA_NESTED }, + [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED }, }; static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = { @@ -211,7 +212,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); - ctrlstatic = priv->ctrlmode_static; + ctrlstatic = can_get_static_ctrlmode(priv); maskedflags = cm->flags & cm->mask; /* check whether provided bits are allowed to be passed */ @@ -383,6 +384,12 @@ static size_t can_tdc_get_size(const struct net_device *dev) return size; } +static size_t can_ctrlmode_ext_get_size(void) +{ + return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */ + nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */ +} + static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -415,6 +422,7 @@ static size_t can_get_size(const struct net_device *dev) priv->data_bitrate_const_cnt); size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */ + size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */ return size; } @@ -472,6 +480,25 @@ err_cancel: return -EMSGSIZE; } +static int can_ctrlmode_ext_fill_info(struct sk_buff *skb, + const struct can_priv *priv) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED, + priv->ctrlmode_supported)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + + nla_nest_end(skb, nest); + return 0; +} + static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -531,7 +558,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) sizeof(priv->bitrate_max), &priv->bitrate_max)) || - (can_tdc_fill_info(skb, dev)) + can_tdc_fill_info(skb, dev) || + + can_ctrlmode_ext_fill_info(skb, priv) ) return -EMSGSIZE; diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 37b0cc65237b..7f80d8e1e750 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -54,8 +54,11 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) struct can_frame *cf = (struct can_frame *)skb->data; work_done++; - stats->rx_packets++; - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_ERR_FLAG)) { + stats->rx_packets++; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; + } netif_receive_skb(skb); } diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile new file mode 100644 index 000000000000..89d5695c902e --- /dev/null +++ b/drivers/net/can/flexcan/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o + +flexcan-objs := +flexcan-objs += flexcan-core.o +flexcan-objs += flexcan-ethtool.o diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan/flexcan-core.c index 12b60ad95b02..0bff1884d5cc 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -15,7 +15,6 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> -#include <linux/can/rx-offload.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/firmware/imx/sci.h> @@ -33,6 +32,8 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> +#include "flexcan.h" + #define DRV_NAME "flexcan" /* 8 for RX fifo and 2 error handling */ @@ -173,9 +174,9 @@ /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ -#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 -#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) +#define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8 +#define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0 +#define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1) #define FLEXCAN_IFLAG_MB(x) BIT_ULL(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) @@ -206,53 +207,6 @@ #define FLEXCAN_TIMEOUT_US (250) -/* FLEXCAN hardware feature flags - * - * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB - * Filter? connected? Passive detection ption in MB Supported? - * MCF5441X FlexCAN2 ? no yes no no yes no 16 - * MX25 FlexCAN2 03.00.00.00 no no no no no no 64 - * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64 - * MX35 FlexCAN2 03.00.00.00 no no no no no no 64 - * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64 - * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64 - * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64 - * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64 - * VF610 FlexCAN3 ? no yes no yes yes? no 64 - * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64 - * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64 - * - * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. - */ - -/* [TR]WRN_INT not connected */ -#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) - /* Disable RX FIFO Global mask */ -#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) -/* Enable EACEN and RRS bit in ctrl2 */ -#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) -/* Disable non-correctable errors interrupt and freeze mode */ -#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) -/* Use timestamp based offloading */ -#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) -/* No interrupt for error passive */ -#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) -/* default to BE register access */ -#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) -/* Setup stop mode with GPR to support wakeup */ -#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8) -/* Support CAN-FD mode */ -#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9) -/* support memory detection and correction */ -#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10) -/* Setup stop mode with SCU firmware to support wakeup */ -#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11) -/* Setup 3 separate interrupts, main, boff and err */ -#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12) -/* Setup 16 mailboxes */ -#define FLEXCAN_QUIRK_NR_MB_16 BIT(13) - /* Structure of the message buffer */ struct flexcan_mb { u32 can_ctrl; @@ -339,106 +293,80 @@ struct flexcan_regs { static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8); -struct flexcan_devtype_data { - u32 quirks; /* quirks needed for different IP cores */ -}; - -struct flexcan_stop_mode { - struct regmap *gpr; - u8 req_gpr; - u8 req_bit; -}; - -struct flexcan_priv { - struct can_priv can; - struct can_rx_offload offload; - struct device *dev; - - struct flexcan_regs __iomem *regs; - struct flexcan_mb __iomem *tx_mb; - struct flexcan_mb __iomem *tx_mb_reserved; - u8 tx_mb_idx; - u8 mb_count; - u8 mb_size; - u8 clk_src; /* clock source of CAN Protocol Engine */ - u8 scu_idx; - - u64 rx_mask; - u64 tx_mask; - u32 reg_ctrl_default; - - struct clk *clk_ipg; - struct clk *clk_per; - const struct flexcan_devtype_data *devtype_data; - struct regulator *reg_xceiver; - struct flexcan_stop_mode stm; - - int irq_boff; - int irq_err; - - /* IPC handle when setup stop mode by System Controller firmware(scfw) */ - struct imx_sc_ipc *sc_ipc_handle; - - /* Read and Write APIs */ - u32 (*read)(void __iomem *addr); - void (*write)(u32 val, void __iomem *addr); -}; - static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16, + FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 | + FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN, + FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx25_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | - FLEXCAN_QUIRK_BROKEN_PERR_STATE, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx28_devtype_data = { - .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR, + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW, + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | - FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC, + FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | - FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD | - FLEXCAN_QUIRK_SUPPORT_ECC, + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -600,7 +528,7 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) priv->write(reg_mcr, ®s->mcr); /* enable stop request */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { ret = flexcan_stop_mode_enable_scfw(priv, true); if (ret < 0) return ret; @@ -619,7 +547,7 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) int ret; /* remove stop request */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { ret = flexcan_stop_mode_enable_scfw(priv, false); if (ret < 0) return ret; @@ -1022,7 +950,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, mb = flexcan_get_mb(priv, n); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { u32 code; do { @@ -1087,7 +1015,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, } mark_as_read: - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), ®s->iflag1); else priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); @@ -1113,7 +1041,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) enum can_state last_state = priv->can.state; /* reception interrupt */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { u64 reg_iflag_rx; int ret; @@ -1173,7 +1101,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* state change interrupt or broken error state quirk fix is enabled */ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || - (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | + (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE))) flexcan_irq_state(dev, reg_esr); @@ -1195,11 +1123,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled */ if ((last_state != priv->can.state) && - (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && + (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: - if (priv->devtype_data->quirks & + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE) flexcan_error_irq_enable(priv); else @@ -1423,26 +1351,26 @@ static int flexcan_rx_offload_setup(struct net_device *dev) else priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16) priv->mb_count = 16; else priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + (sizeof(priv->regs->mb[1]) / priv->mb_size); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP); + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX); else priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO); priv->tx_mb_idx = priv->mb_count - 1; priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); priv->offload.mailbox_read = flexcan_mailbox_read; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { + priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST; priv->offload.mb_last = priv->mb_count - 2; priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, @@ -1506,7 +1434,7 @@ static int flexcan_chip_start(struct net_device *dev) if (err) goto out_chip_disable; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC) flexcan_ram_init(dev); flexcan_set_bittiming(dev); @@ -1532,10 +1460,10 @@ static int flexcan_chip_start(struct net_device *dev) /* MCR * * FIFO: - * - disable for timestamp mode + * - disable for mailbox mode * - enable for FIFO mode */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) reg_mcr &= ~FLEXCAN_MCR_FEN; else reg_mcr |= FLEXCAN_MCR_FEN; @@ -1586,7 +1514,7 @@ static int flexcan_chip_start(struct net_device *dev) * on most Flexcan cores, too. Otherwise we don't get * any error warning or passive interrupts. */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; else @@ -1599,7 +1527,7 @@ static int flexcan_chip_start(struct net_device *dev) netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); priv->write(reg_ctrl, ®s->ctrl); - if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { + if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { reg_ctrl2 = priv->read(®s->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; priv->write(reg_ctrl2, ®s->ctrl2); @@ -1631,7 +1559,7 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(reg_fdctrl, ®s->fdctrl); } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_EMPTY, @@ -1639,7 +1567,7 @@ static int flexcan_chip_start(struct net_device *dev) } } else { /* clear and invalidate unused mailboxes first */ - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) { + for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, &mb->can_ctrl); @@ -1659,7 +1587,7 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(0x0, ®s->rx14mask); priv->write(0x0, ®s->rx15mask); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG) priv->write(0x0, ®s->rxfgmask); /* clear acceptance filters */ @@ -1673,7 +1601,7 @@ static int flexcan_chip_start(struct net_device *dev) * This also works around errata e5295 which generates false * positive memory errors and put the device in freeze mode. */ - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) { /* Follow the protocol as described in "Detection * and Correction of Memory Errors" to write to * MECR register (step 1 - 5) @@ -1799,7 +1727,7 @@ static int flexcan_open(struct net_device *dev) if (err) goto out_can_rx_offload_disable; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { err = request_irq(priv->irq_boff, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) @@ -1845,7 +1773,7 @@ static int flexcan_close(struct net_device *dev) netif_stop_queue(dev); flexcan_chip_interrupts_disable(dev); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { free_irq(priv->irq_err, dev); free_irq(priv->irq_boff, dev); } @@ -2051,9 +1979,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) priv = netdev_priv(dev); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) ret = flexcan_setup_stop_mode_scfw(pdev); - else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) + else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) ret = flexcan_setup_stop_mode_gpr(pdev); else /* return 0 directly if doesn't support stop mode feature */ @@ -2164,8 +2092,25 @@ static int flexcan_probe(struct platform_device *pdev) return -ENODEV; if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && - !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) { - dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n"); + !((devtype_data->quirks & + (FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) == + (FLEXCAN_QUIRK_USE_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) { + dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n"); + return -EINVAL; + } + + if ((devtype_data->quirks & + (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) == + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) { + dev_err(&pdev->dev, + "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n", + devtype_data->quirks); return -EINVAL; } @@ -2177,13 +2122,15 @@ static int flexcan_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &flexcan_netdev_ops; + flexcan_set_ethtool_ops(dev); dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); + priv->devtype_data = *devtype_data; if (of_property_read_bool(pdev->dev.of_node, "big-endian") || - devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { + priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { priv->read = flexcan_read_be; priv->write = flexcan_write_be; } else { @@ -2202,10 +2149,9 @@ static int flexcan_probe(struct platform_device *pdev) priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; priv->clk_src = clk_src; - priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { priv->irq_boff = platform_get_irq(pdev, 1); if (priv->irq_boff <= 0) { err = -ENODEV; @@ -2218,7 +2164,7 @@ static int flexcan_probe(struct platform_device *pdev) } } - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) { + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; priv->can.bittiming_const = &flexcan_fd_bittiming_const; diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c new file mode 100644 index 000000000000..3ae535577700 --- /dev/null +++ b/drivers/net/can/flexcan/flexcan-ethtool.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + */ + +#include <linux/can/dev.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "flexcan.h" + +static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0) + "rx-rtr", +}; + +static void +flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *ext_ack) +{ + const struct flexcan_priv *priv = netdev_priv(ndev); + + ring->rx_max_pending = priv->mb_count; + ring->tx_max_pending = priv->mb_count; + + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) + ring->rx_pending = priv->offload.mb_last - + priv->offload.mb_first + 1; + else + ring->rx_pending = 6; /* RX-FIFO depth is fixed */ + + /* the drive currently supports only on TX buffer */ + ring->tx_pending = 1; +} + +static void +flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + memcpy(data, flexcan_priv_flags_strings, + sizeof(flexcan_priv_flags_strings)); + } +} + +static u32 flexcan_get_priv_flags(struct net_device *ndev) +{ + const struct flexcan_priv *priv = netdev_priv(ndev); + u32 priv_flags = 0; + + if (flexcan_active_rx_rtr(priv)) + priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR; + + return priv_flags; +} + +static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags) +{ + struct flexcan_priv *priv = netdev_priv(ndev); + u32 quirks = priv->devtype_data.quirks; + + if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) { + if (flexcan_supports_rx_mailbox_rtr(priv)) + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + else if (flexcan_supports_rx_fifo(priv)) + quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; + else + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + } else { + if (flexcan_supports_rx_mailbox(priv)) + quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; + else + quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; + } + + if (quirks != priv->devtype_data.quirks && netif_running(ndev)) + return -EBUSY; + + priv->devtype_data.quirks = quirks; + + if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) && + !flexcan_active_rx_rtr(priv)) + netdev_info(ndev, + "Activating RX mailbox mode, cannot receive RTR frames.\n"); + + return 0; +} + +static int flexcan_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(flexcan_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops flexcan_ethtool_ops = { + .get_ringparam = flexcan_get_ringparam, + .get_strings = flexcan_get_strings, + .get_priv_flags = flexcan_get_priv_flags, + .set_priv_flags = flexcan_set_priv_flags, + .get_sset_count = flexcan_get_sset_count, +}; + +void flexcan_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &flexcan_ethtool_ops; +} diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h new file mode 100644 index 000000000000..fccdff8b1f0f --- /dev/null +++ b/drivers/net/can/flexcan/flexcan.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 + * flexcan.c - FLEXCAN CAN controller driver + * + * Copyright (c) 2005-2006 Varma Electronics Oy + * Copyright (c) 2009 Sascha Hauer, Pengutronix + * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2014 David Jander, Protonic Holland + * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com> + * + * Based on code originally by Andrey Volkov <avolkov@varma-el.com> + * + */ + +#ifndef _FLEXCAN_H +#define _FLEXCAN_H + +#include <linux/can/rx-offload.h> + +/* FLEXCAN hardware feature flags + * + * Below is some version info we got: + * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB + * Filter? connected? Passive detection ption in MB Supported? + * MCF5441X FlexCAN2 ? no yes no no yes no 16 + * MX25 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64 + * MX35 FlexCAN2 03.00.00.00 no no no no no no 64 + * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64 + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64 + * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64 + * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64 + * VF610 FlexCAN3 ? no yes no yes yes? no 64 + * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64 + * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64 + * + * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. + */ + +/* [TR]WRN_INT not connected */ +#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) + /* Disable RX FIFO Global mask */ +#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) +/* Enable EACEN and RRS bit in ctrl2 */ +#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) +/* Disable non-correctable errors interrupt and freeze mode */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) +/* Use mailboxes (not FIFO) for RX path */ +#define FLEXCAN_QUIRK_USE_RX_MAILBOX BIT(5) +/* No interrupt for error passive */ +#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) +/* default to BE register access */ +#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) +/* Setup stop mode with GPR to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8) +/* Support CAN-FD mode */ +#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9) +/* support memory detection and correction */ +#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10) +/* Setup stop mode with SCU firmware to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11) +/* Setup 3 separate interrupts, main, boff and err */ +#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12) +/* Setup 16 mailboxes */ +#define FLEXCAN_QUIRK_NR_MB_16 BIT(13) +/* Device supports RX via mailboxes */ +#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14) +/* Device supports RTR reception via mailboxes */ +#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15) +/* Device supports RX via FIFO */ +#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16) + +struct flexcan_devtype_data { + u32 quirks; /* quirks needed for different IP cores */ +}; + +struct flexcan_stop_mode { + struct regmap *gpr; + u8 req_gpr; + u8 req_bit; +}; + +struct flexcan_priv { + struct can_priv can; + struct can_rx_offload offload; + struct device *dev; + + struct flexcan_regs __iomem *regs; + struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; + u8 mb_count; + u8 mb_size; + u8 clk_src; /* clock source of CAN Protocol Engine */ + u8 scu_idx; + + u64 rx_mask; + u64 tx_mask; + u32 reg_ctrl_default; + + struct clk *clk_ipg; + struct clk *clk_per; + struct flexcan_devtype_data devtype_data; + struct regulator *reg_xceiver; + struct flexcan_stop_mode stm; + + int irq_boff; + int irq_err; + + /* IPC handle when setup stop mode by System Controller firmware(scfw) */ + struct imx_sc_ipc *sc_ipc_handle; + + /* Read and Write APIs */ + u32 (*read)(void __iomem *addr); + void (*write)(u32 val, void __iomem *addr); +}; + +void flexcan_set_ethtool_ops(struct net_device *dev); + +static inline bool +flexcan_supports_rx_mailbox(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX; +} + +static inline bool +flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) == + (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR); +} + +static inline bool +flexcan_supports_rx_fifo(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO; +} + +static inline bool +flexcan_active_rx_rtr(const struct flexcan_priv *priv) +{ + const u32 quirks = priv->devtype_data.quirks; + + if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { + if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) + return true; + } else { + /* RX-FIFO is always RTR capable */ + return true; + } + + return false; +} + + +#endif /* _FLEXCAN_H */ diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 78e27940b2af..d0c5a7a60daf 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -255,7 +255,6 @@ struct grcan_priv { struct grcan_dma dma; struct sk_buff **echo_skb; /* We allocate this on our own */ - u8 *txdlc; /* Length of queued frames */ /* The echo skb pointer, pointing into echo_skb and indicating which * frames can be echoed back. See the "Notes on the tx cyclic buffer @@ -515,9 +514,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo) if (echo) { /* Normal echo of messages */ stats->tx_packets++; - stats->tx_bytes += priv->txdlc[i]; - priv->txdlc[i] = 0; - can_get_echo_skb(dev, i, NULL); + stats->tx_bytes += can_get_echo_skb(dev, i, NULL); } else { /* For cleanup of untransmitted messages */ can_free_echo_skb(dev, i, NULL); @@ -1062,16 +1059,10 @@ static int grcan_open(struct net_device *dev) priv->can.echo_skb_max = dma->tx.size; priv->can.echo_skb = priv->echo_skb; - priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL); - if (!priv->txdlc) { - err = -ENOMEM; - goto exit_free_echo_skb; - } - /* Get can device up */ err = open_candev(dev); if (err) - goto exit_free_txdlc; + goto exit_free_echo_skb; err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED, dev->name, dev); @@ -1093,8 +1084,6 @@ static int grcan_open(struct net_device *dev) exit_close_candev: close_candev(dev); -exit_free_txdlc: - kfree(priv->txdlc); exit_free_echo_skb: kfree(priv->echo_skb); exit_free_dma_buffers: @@ -1129,7 +1118,6 @@ static int grcan_close(struct net_device *dev) priv->can.echo_skb_max = 0; priv->can.echo_skb = NULL; kfree(priv->echo_skb); - kfree(priv->txdlc); return 0; } @@ -1211,11 +1199,11 @@ static int grcan_receive(struct net_device *dev, int budget) shift = GRCAN_MSG_DATA_SHIFT(i); cf->data[i] = (u8)(slot[j] >> shift); } - } - /* Update statistics and read pointer */ + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_receive_skb(skb); rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); @@ -1447,7 +1435,6 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, * can_put_echo_skb would be an error unless other measures are * taken. */ - priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */ can_put_echo_skb(skb, dev, slotindex, 0); /* Make sure everything is written before allowing hardware to diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 5bb957a26bc6..b0a3473f211d 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -309,15 +309,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev) *(u32 *)(cf->data + i) = readl(priv->base + IFI_CANFD_RXFIFO_DATA + i); } + + stats->rx_bytes += cf->len; } + stats->rx_packets++; /* Remove the packet from FIFO */ writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD); writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT); - stats->rx_packets++; - stats->rx_bytes += cf->len; - netif_receive_skb(skb); } @@ -430,8 +430,6 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) priv->base + IFI_CANFD_INTERRUPT); writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -456,7 +454,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, enum can_state new_state) { struct ifi_canfd_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; @@ -522,8 +519,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 32006dbf5abd..808c105cf8f7 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1285,7 +1285,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) { struct sk_buff *skb = skb_dequeue(&mod->echoq); struct can_frame *cf; - u8 dlc; + u8 dlc = 0; /* this should never trigger unless there is a driver bug */ if (!skb) { @@ -1294,7 +1294,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) } cf = (struct can_frame *)skb->data; - dlc = cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + dlc = cf->len; /* check flag whether this packet has to be looped back */ if (skb->pkt_type != PACKET_LOOPBACK) { @@ -1421,7 +1422,8 @@ static int ican3_recv_skb(struct ican3_dev *mod) /* update statistics, receive the skb */ stats->rx_packets++; - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; netif_receive_skb(skb); err_noalloc: diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 74d9899fc904..017f2d36ffc3 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SPACK_EWLR BIT(23) #define KVASER_PCIEFD_SPACK_EPLR BIT(24) +/* Kvaser KCAN_EPACK second word */ +#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) + struct kvaser_pciefd; struct kvaser_pciefd_can { @@ -1182,20 +1185,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); - if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) + if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, data, cf->len); + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(div_u64(p->timestamp * 1000, pcie->freq_to_ticks_div)); - stats->rx_bytes += cf->len; - stats->rx_packets++; - return netif_rx(skb); } @@ -1285,7 +1289,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, can->err_rep_cnt++; can->can.can_stats.bus_error++; - stats->rx_errors++; + if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) + stats->tx_errors++; + else + stats->rx_errors++; can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; @@ -1304,9 +1311,6 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->len; - netif_rx(skb); return 0; } @@ -1504,8 +1508,6 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, if (skb) { cf->can_id |= CAN_ERR_BUSERROR; - stats->rx_bytes += cf->len; - stats->rx_packets++; netif_rx(skb); } else { stats->rx_dropped++; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2470c47b2e31..5b47cd867783 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -204,16 +204,16 @@ enum m_can_reg { /* Interrupts for version 3.0.x */ #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) -#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) /* Interrupts for version >= 3.1.x */ #define IR_ERR_LEC_31X (IR_PED | IR_PEA) -#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \ - IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ - IR_RF1L | IR_RF0L) +#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ + IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ + IR_RF0L) #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) /* Interrupt Line Select (ILS) */ @@ -517,21 +517,23 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs) err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, cf->data, DIV_ROUND_UP(cf->len, 4)); if (err) - goto out_fail; + goto out_free_skb; + + stats->rx_bytes += cf->len; } + stats->rx_packets++; /* acknowledge rx fifo 0 */ m_can_write(cdev, M_CAN_RXF0A, fgi); - stats->rx_packets++; - stats->rx_bytes += cf->len; - timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc); m_can_receive_skb(cdev, skb, timestamp); return 0; +out_free_skb: + kfree_skb(skb); out_fail: netdev_err(dev, "FIFO read returned %d\n", err); return err; @@ -645,9 +647,6 @@ static int m_can_handle_lec_err(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->len; - if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); @@ -704,7 +703,6 @@ static int m_can_handle_state_change(struct net_device *dev, enum can_state new_state) { struct m_can_classdev *cdev = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; @@ -769,9 +767,6 @@ static int m_can_handle_state_change(struct net_device *dev, break; } - stats->rx_packets++; - stats->rx_bytes += cf->len; - if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); @@ -810,8 +805,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_ELO) - netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); if (irqstatus & IR_BEC) @@ -1463,7 +1456,7 @@ static bool m_can_niso_supported(struct m_can_classdev *cdev) static int m_can_dev_setup(struct m_can_classdev *cdev) { struct net_device *dev = cdev->net; - int m_can_version; + int m_can_version, err; m_can_version = m_can_check_core_release(cdev); /* return if unsupported version */ @@ -1493,21 +1486,37 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) switch (cdev->version) { case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_30X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X; + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_30X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ - can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); + if (err) + return err; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ - cdev->can.bittiming_const = &m_can_bittiming_const_31X; - cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; + + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; cdev->can.ctrlmode_supported |= (m_can_niso_supported(cdev) ? diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index d18b515e6ccc..2c5d40997168 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -85,6 +85,9 @@ struct m_can_classdev { struct sk_buff *tx_skb; struct phy *transceiver; + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + struct m_can_ops *ops; int version; diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 89cc3d41e952..b56a54d6c5a9 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -18,9 +18,14 @@ #define M_CAN_PCI_MMIO_BAR 0 -#define M_CAN_CLOCK_FREQ_EHL 100000000 #define CTL_CSR_INT_CTL_OFFSET 0x508 +struct m_can_pci_config { + const struct can_bittiming_const *bit_timing; + const struct can_bittiming_const *data_timing; + unsigned int clock_freq; +}; + struct m_can_pci_priv { struct m_can_classdev cdev; @@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *src = priv->base + offset; - ioread32_rep(priv->base + offset, val, val_count); + while (val_count--) { + *(unsigned int *)val = ioread32(src); + val += 4; + src += 4; + } return 0; } @@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, const void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); + void __iomem *dst = priv->base + offset; - iowrite32_rep(priv->base + offset, val, val_count); + while (val_count--) { + iowrite32(*(unsigned int *)val, dst); + val += 4; + dst += 4; + } return 0; } @@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = { .read_fifo = iomap_read_fifo, }; +static const struct can_bittiming_const m_can_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1, +}; + +static const struct can_bittiming_const m_can_data_bittiming_const_ehl = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static const struct m_can_pci_config m_can_pci_ehl = { + .bit_timing = &m_can_bittiming_const_ehl, + .data_timing = &m_can_data_bittiming_const_ehl, + .clock_freq = 200000000, +}; + static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct device *dev = &pci->dev; + const struct m_can_pci_config *cfg; struct m_can_classdev *mcan_class; struct m_can_pci_priv *priv; void __iomem *base; @@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (!mcan_class) return -ENOMEM; + cfg = (const struct m_can_pci_config *)id->driver_data; + priv = cdev_to_priv(mcan_class); priv->base = base; @@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) mcan_class->dev = &pci->dev; mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->pm_clock_support = 1; - mcan_class->can.clock.freq = id->driver_data; + mcan_class->bit_timing = cfg->bit_timing; + mcan_class->data_timing = cfg->data_timing; + mcan_class->can.clock.freq = cfg->clock_freq; mcan_class->ops = &m_can_pci_ops; pci_set_drvdata(pci, mcan_class); @@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, m_can_pci_suspend, m_can_pci_resume); static const struct pci_device_id m_can_pci_id_table[] = { - { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, - { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, }, + { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index fa32e418eb29..5b5802fac772 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -401,13 +401,15 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) continue; } - if (canrflg & MSCAN_RXF) + if (canrflg & MSCAN_RXF) { mscan_get_rx_frame(dev, frame); - else if (canrflg & MSCAN_ERR_IF) + stats->rx_packets++; + if (!(frame->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += frame->len; + } else if (canrflg & MSCAN_ERR_IF) { mscan_get_err_frame(dev, frame, canrflg); + } - stats->rx_packets++; - stats->rx_bytes += frame->len; work_done++; netif_receive_skb(skb); } @@ -446,9 +448,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id) continue; out_8(®s->cantbsel, mask); - stats->tx_bytes += in_8(®s->tx.dlr); + stats->tx_bytes += can_get_echo_skb(dev, entry->id, + NULL); stats->tx_packets++; - can_get_echo_skb(dev, entry->id, NULL); priv->tx_active &= ~mask; list_del(pos); } diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 92a54a5fd4c5..888bef03de09 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -561,9 +561,6 @@ static void pch_can_error(struct net_device *ndev, u32 status) priv->can.state = state; netif_receive_skb(skb); - - stats->rx_packets++; - stats->rx_bytes += cf->len; } static irqreturn_t pch_can_interrupt(int irq, void *dev_id) @@ -680,23 +677,24 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) cf->can_id = id; } - if (id2 & PCH_ID2_DIR) - cf->can_id |= CAN_RTR_FLAG; - cf->len = can_cc_dlc2len((ioread32(&priv->regs-> ifregs[0].mcont)) & 0xF); - for (i = 0; i < cf->len; i += 2) { - data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]); - cf->data[i] = data_reg; - cf->data[i + 1] = data_reg >> 8; - } + if (id2 & PCH_ID2_DIR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + for (i = 0; i < cf->len; i += 2) { + data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]); + cf->data[i] = data_reg; + cf->data[i + 1] = data_reg >> 8; + } - netif_receive_skb(skb); - rcv_pkts++; + stats->rx_bytes += cf->len; + } stats->rx_packets++; + rcv_pkts++; quota--; - stats->rx_bytes += cf->len; + netif_receive_skb(skb); pch_fifo_thresh(priv, obj_num); obj_num++; @@ -709,16 +707,13 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat) { struct pch_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &(priv->ndev->stats); - u32 dlc; - can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL); + stats->tx_bytes += can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, + NULL); + stats->tx_packets++; iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND, &priv->regs->ifregs[1].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat); - dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) & - PCH_IF_MCONT_DLC); - stats->tx_bytes += dlc; - stats->tx_packets++; if (int_stat == PCH_TX_OBJ_END) netif_wake_queue(ndev); } diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index d08718e98e11..b2dea360813d 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -266,10 +266,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); - can_get_echo_skb(priv->ndev, msg->client, NULL); /* count bytes of the echo instead of skb */ - stats->tx_bytes += cf_len; + stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL); stats->tx_packets++; /* restart tx queue (a slot is free) */ @@ -310,12 +309,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, if (rx_msg_flags & PUCAN_MSG_EXT_ID) cf->can_id |= CAN_EFF_FLAG; - if (rx_msg_flags & PUCAN_MSG_RTR) + if (rx_msg_flags & PUCAN_MSG_RTR) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, msg->d, cf->len); - stats->rx_bytes += cf->len; + stats->rx_bytes += cf->len; + } stats->rx_packets++; pucan_netif_rx(skb, msg->ts_low, msg->ts_high); @@ -409,8 +409,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return -ENOMEM; } - stats->rx_packets++; - stats->rx_bytes += cf->len; pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; @@ -438,8 +436,6 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; - stats->rx_bytes += cf->len; - stats->rx_packets++; netif_rx(skb); return 0; diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 8999ec9455ec..33e37395379d 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -94,7 +94,6 @@ struct rcar_can_priv { struct rcar_can_regs __iomem *regs; struct clk *clk; struct clk *can_clk; - u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; u32 tx_head; u32 tx_tail; u8 clock_select; @@ -223,7 +222,6 @@ static void tx_failure_cleanup(struct net_device *ndev) static void rcar_can_error(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u8 eifr, txerr = 0, rxerr = 0; @@ -362,11 +360,8 @@ static void rcar_can_error(struct net_device *ndev) } } - if (skb) { - stats->rx_packets++; - stats->rx_bytes += cf->len; + if (skb) netif_rx(skb); - } } static void rcar_can_tx_done(struct net_device *ndev) @@ -383,10 +378,11 @@ static void rcar_can_tx_done(struct net_device *ndev) if (priv->tx_head - priv->tx_tail <= unsent) break; stats->tx_packets++; - stats->tx_bytes += priv->tx_dlc[priv->tx_tail % - RCAR_CAN_FIFO_DEPTH]; - priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0; - can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL); + stats->tx_bytes += + can_get_echo_skb(ndev, + priv->tx_tail % RCAR_CAN_FIFO_DEPTH, + NULL); + priv->tx_tail++; netif_wake_queue(ndev); } @@ -616,7 +612,6 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); - priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len; can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0); priv->tx_head++; /* Start Tx: write 0xff to the TFPCR register to increment @@ -666,12 +661,13 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv) for (dlc = 0; dlc < cf->len; dlc++) cf->data[dlc] = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]); + + stats->rx_bytes += cf->len; } + stats->rx_packets++; can_led_event(priv->ndev, CAN_LED_EVENT_RX); - stats->rx_bytes += cf->len; - stats->rx_packets++; netif_receive_skb(skb); } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index ff9d0f5ae0dd..b7dc1c32875f 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -502,7 +502,6 @@ struct rcar_canfd_channel { struct rcar_canfd_global *gpriv; /* Controller reference */ void __iomem *base; /* Register base address */ struct napi_struct napi; - u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */ u32 tx_head; /* Incremented on xmit */ u32 tx_tail; /* Incremented on xmit done */ u32 channel; /* Channel number */ @@ -1033,8 +1032,6 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, /* Clear channel error interrupts that are handled */ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), RCANFD_CERFL_ERR(~cerfl)); - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1051,9 +1048,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev) sent = priv->tx_tail % RCANFD_FIFO_DEPTH; stats->tx_packets++; - stats->tx_bytes += priv->tx_len[sent]; - priv->tx_len[sent] = 0; - can_get_echo_skb(ndev, sent, NULL); + stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_tail++; @@ -1174,8 +1169,6 @@ static void rcar_canfd_state_change(struct net_device *ndev, rx_state = txerr <= rxerr ? state : 0; can_change_state(ndev, cf, tx_state, rx_state); - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } } @@ -1465,7 +1458,6 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); } - priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len; can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0); spin_lock_irqsave(&priv->tx_lock, flags); @@ -1554,7 +1546,8 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) can_led_event(priv->ndev, CAN_LED_EVENT_RX); - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); } @@ -1640,8 +1633,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH); if (!ndev) { dev_err(&pdev->dev, "alloc_candev() failed\n"); - err = -ENOMEM; - goto fail; + return -ENOMEM; } priv = netdev_priv(ndev); @@ -1706,7 +1698,9 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, &rcar_canfd_data_bittiming_const; /* Controller starts in CAN FD only mode */ - can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); + err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); + if (err) + goto fail; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; } else { /* Controller starts in Classical CAN only mode */ @@ -1735,8 +1729,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, fail_candev: netif_napi_del(&priv->napi); - free_candev(ndev); fail: + free_candev(ndev); return err; } diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index e21b169c14c0..4642b6d4aaf7 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 3fad54646746..966316479485 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -372,15 +372,16 @@ static void sja1000_rx(struct net_device *dev) } else { for (i = 0; i < cf->len; i++) cf->data[i] = priv->read_reg(priv, dreg++); + + stats->rx_bytes += cf->len; } + stats->rx_packets++; cf->can_id = id; /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -487,8 +488,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -528,10 +527,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) can_free_echo_skb(dev, 0, NULL); } else { /* transmission complete */ - stats->tx_bytes += - priv->read_reg(priv, SJA1000_FI) & 0xf; + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; - can_get_echo_skb(dev, 0, NULL); } netif_wake_queue(dev); can_led_event(dev, CAN_LED_EVENT_TX); diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index d7c2ec529b8f..f9ec7bd8dfac 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -17,7 +17,6 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_irq.h> #include "sja1000.h" @@ -234,13 +233,15 @@ static int sp_probe(struct platform_device *pdev) if (!addr) return -ENOMEM; - if (of) - irq = irq_of_parse_and_map(of, 0); - else + if (of) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + } else { res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - - if (!irq && !res_irq) - return -ENODEV; + if (!res_irq) + return -ENODEV; + } of_id = of_match_device(sp_of_table, &pdev->dev); if (of_id && of_id->data) { diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 9a4ebda30510..d4c7ce998a34 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -218,7 +218,9 @@ static void slc_bump(struct slcan *sl) skb_put_data(skb, &cf, sizeof(struct can_frame)); sl->dev->stats.rx_packets++; - sl->dev->stats.rx_bytes += cf.len; + if (!(cf.can_id & CAN_RTR_FLAG)) + sl->dev->stats.rx_bytes += cf.len; + netif_rx_ni(skb); } @@ -288,6 +290,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i++) pos = hex_byte_pack_upper(pos, cf->data[i]); + + sl->dev->stats.tx_bytes += cf->len; } *pos++ = '\r'; @@ -304,7 +308,6 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); sl->xleft = (pos - sl->xbuff) - actual; sl->xhead = sl->xbuff + actual; - sl->dev->stats.tx_bytes += cf->len; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 2e93ee792373..e5c939b63fa6 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia) return 0; platform_failed: - kfree(dev); + platform_device_put(pdev); mem_failed: pcmcia_bad: pcmcia_failed: diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 7e1536877993..32286f861a19 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up) if (ret < 0) goto failed; } - /* enable_error_frame */ - /* + + /* enable_error_frame + * * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later - * - if (error_reporting) { + */ + if (0 && error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } - */ + /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index cfc1325aad10..d74e895bddf7 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -282,7 +282,10 @@ static int softing_handle_1(struct softing *card) skb = priv->can.echo_skb[priv->tx.echo_get]; if (skb) skb->tstamp = ktime; - can_get_echo_skb(netdev, priv->tx.echo_get, NULL); + ++netdev->stats.tx_packets; + netdev->stats.tx_bytes += + can_get_echo_skb(netdev, priv->tx.echo_get, + NULL); ++priv->tx.echo_get; if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) priv->tx.echo_get = 0; @@ -290,9 +293,6 @@ static int softing_handle_1(struct softing *card) --priv->tx.pending; if (card->tx.pending) --card->tx.pending; - ++netdev->stats.tx_packets; - if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.tx_bytes += msg.len; } else { int ret; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 89d9c986a229..cfcc14fe3e42 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -25,11 +25,11 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> @@ -153,7 +153,6 @@ struct hi3110_priv { u8 *spi_rx_buf; struct sk_buff *tx_skb; - int tx_len; struct workqueue_struct *wq; struct work_struct tx_work; @@ -166,6 +165,8 @@ struct hi3110_priv { #define HI3110_AFTER_SUSPEND_POWER 4 #define HI3110_AFTER_SUSPEND_RESTART 8 int restart_tx; + bool tx_busy; + struct regulator *power; struct regulator *transceiver; struct clk *clk; @@ -175,13 +176,13 @@ static void hi3110_clean(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); - if (priv->tx_skb || priv->tx_len) + if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); - if (priv->tx_len) + if (priv->tx_busy) can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; } /* Note about handling of error return of hi3110_spi_trans: accessing @@ -343,14 +344,15 @@ static void hi3110_hw_rx(struct spi_device *spi) /* Data length */ frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); - if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) + if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) { frame->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF, frame->len); + priv->net->stats.rx_bytes += frame->len; + } priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->len; can_led_event(priv->net, CAN_LED_EVENT_RX); @@ -368,7 +370,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb, struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - if (priv->tx_skb || priv->tx_len) { + if (priv->tx_skb || priv->tx_busy) { dev_err(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } @@ -585,7 +587,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws) } else { frame = (struct can_frame *)priv->tx_skb->data; hi3110_hw_tx(spi, frame); - priv->tx_len = 1 + frame->len; + priv->tx_busy = true; can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } @@ -720,14 +722,11 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) } } - if (priv->tx_len && statf & HI3110_STAT_TXMTY) { + if (priv->tx_busy && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; - net->stats.tx_bytes += priv->tx_len - 1; + net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL); can_led_event(net, CAN_LED_EVENT_TX); - if (priv->tx_len) { - can_get_echo_skb(net, 0, NULL); - priv->tx_len = 0; - } + priv->tx_busy = false; netif_wake_queue(net); } @@ -754,7 +753,7 @@ static int hi3110_open(struct net_device *net) priv->force_quit = 0; priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist, flags, DEVICE_NAME, priv); @@ -828,19 +827,25 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table); static int hi3110_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(hi3110_of_match, - &spi->dev); + struct device *dev = &spi->dev; struct net_device *net; struct hi3110_priv *priv; + const void *match; struct clk *clk; - int freq, ret; + u32 freq; + int ret; + + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n"); - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - dev_err(&spi->dev, "no CAN clock source defined\n"); - return PTR_ERR(clk); + if (clk) { + freq = clk_get_rate(clk); + } else { + ret = device_property_read_u32(dev, "clock-frequency", &freq); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n"); } - freq = clk_get_rate(clk); /* Sanity check */ if (freq > 40000000) @@ -851,11 +856,9 @@ static int hi3110_can_probe(struct spi_device *spi) if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; - } + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; net->netdev_ops = &hi3110_netdev_ops; net->flags |= IFF_ECHO; @@ -870,8 +873,9 @@ static int hi3110_can_probe(struct spi_device *spi) CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; - if (of_id) - priv->model = (enum hi3110_model)(uintptr_t)of_id->data; + match = device_get_match_data(dev); + if (match) + priv->model = (enum hi3110_model)(uintptr_t)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; @@ -918,9 +922,7 @@ static int hi3110_can_probe(struct spi_device *spi) ret = hi3110_hw_probe(spi); if (ret) { - if (ret == -ENODEV) - dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n", - priv->model); + dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model); goto error_probe; } hi3110_hw_sleep(spi); @@ -938,14 +940,12 @@ static int hi3110_can_probe(struct spi_device *spi) hi3110_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); - dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); - return ret; + return dev_err_probe(dev, ret, "Probe failed\n"); } static int hi3110_can_remove(struct spi_device *spi) @@ -957,8 +957,7 @@ static int hi3110_can_remove(struct spi_device *spi) hi3110_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free_candev(net); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 0579ab74f728..025e07cb7439 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -237,7 +237,6 @@ struct mcp251x_priv { u8 *spi_rx_buf; struct sk_buff *tx_skb; - int tx_len; struct workqueue_struct *wq; struct work_struct tx_work; @@ -250,6 +249,8 @@ struct mcp251x_priv { #define AFTER_SUSPEND_POWER 4 #define AFTER_SUSPEND_RESTART 8 int restart_tx; + bool tx_busy; + struct regulator *power; struct regulator *transceiver; struct clk *clk; @@ -272,13 +273,13 @@ static void mcp251x_clean(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); - if (priv->tx_skb || priv->tx_len) + if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); - if (priv->tx_len) + if (priv->tx_busy) can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; } /* Note about handling of error return of mcp251x_spi_trans: accessing @@ -600,9 +601,6 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv) gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names); gpio->names = mcp251x_gpio_names; gpio->can_sleep = true; -#ifdef CONFIG_OF_GPIO - gpio->of_node = priv->spi->dev.of_node; -#endif return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv); } @@ -733,10 +731,12 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) } /* Data length */ frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); - memcpy(frame->data, buf + RXBDAT_OFF, frame->len); + if (!(frame->can_id & CAN_RTR_FLAG)) { + memcpy(frame->data, buf + RXBDAT_OFF, frame->len); + priv->net->stats.rx_bytes += frame->len; + } priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->len; can_led_event(priv->net, CAN_LED_EVENT_RX); @@ -786,7 +786,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; - if (priv->tx_skb || priv->tx_len) { + if (priv->tx_skb || priv->tx_busy) { dev_warn(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } @@ -1011,7 +1011,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws) if (frame->len > CAN_FRAME_MAX_DATA_LEN) frame->len = CAN_FRAME_MAX_DATA_LEN; mcp251x_hw_tx(spi, frame, 0); - priv->tx_len = 1 + frame->len; + priv->tx_busy = true; can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } @@ -1177,12 +1177,12 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) break; if (intf & CANINTF_TX) { - net->stats.tx_packets++; - net->stats.tx_bytes += priv->tx_len - 1; can_led_event(net, CAN_LED_EVENT_TX); - if (priv->tx_len) { - can_get_echo_skb(net, 0, NULL); - priv->tx_len = 0; + if (priv->tx_busy) { + net->stats.tx_packets++; + net->stats.tx_bytes += can_get_echo_skb(net, 0, + NULL); + priv->tx_busy = false; } netif_wake_queue(net); } @@ -1209,7 +1209,7 @@ static int mcp251x_open(struct net_device *net) priv->force_quit = 0; priv->tx_skb = NULL; - priv->tx_len = 0; + priv->tx_busy = false; if (!dev_fwnode(&spi->dev)) flags = IRQF_TRIGGER_FALLING; diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile index 3cba3b9447ea..a83d685d64e0 100644 --- a/drivers/net/can/spi/mcp251xfd/Makefile +++ b/drivers/net/can/spi/mcp251xfd/Makefile @@ -3,9 +3,14 @@ obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o mcp251xfd-objs := +mcp251xfd-objs += mcp251xfd-chip-fifo.o mcp251xfd-objs += mcp251xfd-core.o mcp251xfd-objs += mcp251xfd-crc16.o mcp251xfd-objs += mcp251xfd-regmap.o +mcp251xfd-objs += mcp251xfd-ring.o +mcp251xfd-objs += mcp251xfd-rx.o +mcp251xfd-objs += mcp251xfd-tef.o mcp251xfd-objs += mcp251xfd-timestamp.o +mcp251xfd-objs += mcp251xfd-tx.o mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c new file mode 100644 index 000000000000..2f9a623d381d --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static int +mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u32 fifo_con; + + /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. + * + * FIFOs hit by a RX MAB overflow and RXOVIE enabled will + * generate a RXOVIF, use this to properly detect RX MAB + * overflows. + */ + fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, + ring->obj_num - 1) | + MCP251XFD_REG_FIFOCON_RXTSEN | + MCP251XFD_REG_FIFOCON_RXOVIE | + MCP251XFD_REG_FIFOCON_TFNRFNIE; + + if (mcp251xfd_is_fd_mode(priv)) + fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_64); + else + fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_8); + + return regmap_write(priv->map_reg, + MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); +} + +static int +mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u32 fltcon; + + fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | + MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); + + return regmap_update_bits(priv->map_reg, + MCP251XFD_REG_FLTCON(ring->nr >> 2), + MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), + fltcon); +} + +int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + const struct mcp251xfd_rx_ring *rx_ring; + u32 val; + int err, n; + + /* TEF */ + val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, + tx_ring->obj_num - 1) | + MCP251XFD_REG_TEFCON_TEFTSEN | + MCP251XFD_REG_TEFCON_TEFOVIE | + MCP251XFD_REG_TEFCON_TEFNEIE; + + err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); + if (err) + return err; + + /* FIFO 1 - TX */ + val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, + tx_ring->obj_num - 1) | + MCP251XFD_REG_FIFOCON_TXEN | + MCP251XFD_REG_FIFOCON_TXATIE; + + if (mcp251xfd_is_fd_mode(priv)) + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_64); + else + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, + MCP251XFD_REG_FIFOCON_PLSIZE_8); + + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, + MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); + else + val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, + MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); + + err = regmap_write(priv->map_reg, + MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO), + val); + if (err) + return err; + + /* RX FIFOs */ + mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { + err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); + if (err) + return err; + + err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index e16dc482f327..b5986df6eca0 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -20,8 +20,6 @@ #include <linux/pm_runtime.h> #include <linux/property.h> -#include <asm/unaligned.h> - #include "mcp251xfd.h" #define DEVICE_NAME "mcp251xfd" @@ -180,330 +178,6 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) return 0; } -static inline u8 -mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, - union mcp251xfd_write_reg_buf *write_reg_buf, - const u16 reg, const u32 mask, const u32 val) -{ - u8 first_byte, last_byte, len; - u8 *data; - __le32 val_le32; - - first_byte = mcp251xfd_first_byte_set(mask); - last_byte = mcp251xfd_last_byte_set(mask); - len = last_byte - first_byte + 1; - - data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte); - val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); - memcpy(data, &val_le32, len); - - if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) { - u16 crc; - - mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, - len); - /* CRC */ - len += sizeof(write_reg_buf->crc.cmd); - crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); - put_unaligned_be16(crc, (void *)write_reg_buf + len); - - /* Total length */ - len += sizeof(write_reg_buf->crc.crc); - } else { - len += sizeof(write_reg_buf->nocrc.cmd); - } - - return len; -} - -static inline int -mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, - u8 *tef_tail) -{ - u32 tef_ua; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); - if (err) - return err; - - *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); - - return 0; -} - -static inline int -mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, - u8 *tx_tail) -{ - u32 fifo_sta; - int err; - - err = regmap_read(priv->map_reg, - MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO), - &fifo_sta); - if (err) - return err; - - *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); - - return 0; -} - -static inline int -mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring, - u8 *rx_head) -{ - u32 fifo_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), - &fifo_sta); - if (err) - return err; - - *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); - - return 0; -} - -static inline int -mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring, - u8 *rx_tail) -{ - u32 fifo_ua; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), - &fifo_ua); - if (err) - return err; - - fifo_ua -= ring->base - MCP251XFD_RAM_START; - *rx_tail = fifo_ua / ring->obj_size; - - return 0; -} - -static void -mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_tx_ring *ring, - struct mcp251xfd_tx_obj *tx_obj, - const u8 rts_buf_len, - const u8 n) -{ - struct spi_transfer *xfer; - u16 addr; - - /* FIFO load */ - addr = mcp251xfd_get_tx_obj_addr(ring, n); - if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) - mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, - addr); - else - mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, - addr); - - xfer = &tx_obj->xfer[0]; - xfer->tx_buf = &tx_obj->buf; - xfer->len = 0; /* actual len is assigned on the fly */ - xfer->cs_change = 1; - xfer->cs_change_delay.value = 0; - xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - - /* FIFO request to send */ - xfer = &tx_obj->xfer[1]; - xfer->tx_buf = &ring->rts_buf; - xfer->len = rts_buf_len; - - /* SPI message */ - spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, - ARRAY_SIZE(tx_obj->xfer)); -} - -static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) -{ - struct mcp251xfd_tef_ring *tef_ring; - struct mcp251xfd_tx_ring *tx_ring; - struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; - struct mcp251xfd_tx_obj *tx_obj; - struct spi_transfer *xfer; - u32 val; - u16 addr; - u8 len; - int i, j; - - netdev_reset_queue(priv->ndev); - - /* TEF */ - tef_ring = priv->tef; - tef_ring->head = 0; - tef_ring->tail = 0; - - /* FIFO increment TEF tail pointer */ - addr = MCP251XFD_REG_TEFCON; - val = MCP251XFD_REG_TEFCON_UINC; - len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, - addr, val, val); - - for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { - xfer = &tef_ring->uinc_xfer[j]; - xfer->tx_buf = &tef_ring->uinc_buf; - xfer->len = len; - xfer->cs_change = 1; - xfer->cs_change_delay.value = 0; - xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - } - - /* "cs_change == 1" on the last transfer results in an active - * chip select after the complete SPI message. This causes the - * controller to interpret the next register access as - * data. Set "cs_change" of the last transfer to "0" to - * properly deactivate the chip select at the end of the - * message. - */ - xfer->cs_change = 0; - - /* TX */ - tx_ring = priv->tx; - tx_ring->head = 0; - tx_ring->tail = 0; - tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num); - - /* FIFO request to send */ - addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO); - val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; - len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, - addr, val, val); - - mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) - mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); - - /* RX */ - mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { - rx_ring->head = 0; - rx_ring->tail = 0; - rx_ring->nr = i; - rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i); - - if (!prev_rx_ring) - rx_ring->base = - mcp251xfd_get_tx_obj_addr(tx_ring, - tx_ring->obj_num); - else - rx_ring->base = prev_rx_ring->base + - prev_rx_ring->obj_size * - prev_rx_ring->obj_num; - - prev_rx_ring = rx_ring; - - /* FIFO increment RX tail pointer */ - addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); - val = MCP251XFD_REG_FIFOCON_UINC; - len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, - addr, val, val); - - for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { - xfer = &rx_ring->uinc_xfer[j]; - xfer->tx_buf = &rx_ring->uinc_buf; - xfer->len = len; - xfer->cs_change = 1; - xfer->cs_change_delay.value = 0; - xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; - } - - /* "cs_change == 1" on the last transfer results in an - * active chip select after the complete SPI - * message. This causes the controller to interpret - * the next register access as data. Set "cs_change" - * of the last transfer to "0" to properly deactivate - * the chip select at the end of the message. - */ - xfer->cs_change = 0; - } -} - -static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) -{ - int i; - - for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { - kfree(priv->rx[i]); - priv->rx[i] = NULL; - } -} - -static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) -{ - struct mcp251xfd_tx_ring *tx_ring; - struct mcp251xfd_rx_ring *rx_ring; - int tef_obj_size, tx_obj_size, rx_obj_size; - int tx_obj_num; - int ram_free, i; - - tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj); - /* listen-only mode works like FD mode */ - if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) { - tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD; - tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); - rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); - } else { - tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN; - tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); - rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); - } - - tx_ring = priv->tx; - tx_ring->obj_num = tx_obj_num; - tx_ring->obj_size = tx_obj_size; - - ram_free = MCP251XFD_RAM_SIZE - tx_obj_num * - (tef_obj_size + tx_obj_size); - - for (i = 0; - i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size; - i++) { - int rx_obj_num; - - rx_obj_num = ram_free / rx_obj_size; - rx_obj_num = min(1 << (fls(rx_obj_num) - 1), - MCP251XFD_RX_OBJ_NUM_MAX); - - rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, - GFP_KERNEL); - if (!rx_ring) { - mcp251xfd_ring_free(priv); - return -ENOMEM; - } - rx_ring->obj_num = rx_obj_num; - rx_ring->obj_size = rx_obj_size; - priv->rx[i] = rx_ring; - - ram_free -= rx_ring->obj_num * rx_ring->obj_size; - } - priv->rx_ring_num = i; - - netdev_dbg(priv->ndev, - "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n", - tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num, - tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num); - - mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { - netdev_dbg(priv->ndev, - "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n", - i, rx_ring->obj_num, rx_ring->obj_size, - rx_ring->obj_size * rx_ring->obj_num); - } - - netdev_dbg(priv->ndev, - "FIFO setup: free: %d bytes\n", - ram_free); - - return 0; -} - static inline int mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) { @@ -838,108 +512,6 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); } -static int -mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring) -{ - u32 fifo_con; - - /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. - * - * FIFOs hit by a RX MAB overflow and RXOVIE enabled will - * generate a RXOVIF, use this to properly detect RX MAB - * overflows. - */ - fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, - ring->obj_num - 1) | - MCP251XFD_REG_FIFOCON_RXTSEN | - MCP251XFD_REG_FIFOCON_RXOVIE | - MCP251XFD_REG_FIFOCON_TFNRFNIE; - - if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) - fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, - MCP251XFD_REG_FIFOCON_PLSIZE_64); - else - fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, - MCP251XFD_REG_FIFOCON_PLSIZE_8); - - return regmap_write(priv->map_reg, - MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); -} - -static int -mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring) -{ - u32 fltcon; - - fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | - MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); - - return regmap_update_bits(priv->map_reg, - MCP251XFD_REG_FLTCON(ring->nr >> 2), - MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), - fltcon); -} - -static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - const struct mcp251xfd_rx_ring *rx_ring; - u32 val; - int err, n; - - /* TEF */ - val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, - tx_ring->obj_num - 1) | - MCP251XFD_REG_TEFCON_TEFTSEN | - MCP251XFD_REG_TEFCON_TEFOVIE | - MCP251XFD_REG_TEFCON_TEFNEIE; - - err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); - if (err) - return err; - - /* FIFO 1 - TX */ - val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, - tx_ring->obj_num - 1) | - MCP251XFD_REG_FIFOCON_TXEN | - MCP251XFD_REG_FIFOCON_TXATIE; - - if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) - val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, - MCP251XFD_REG_FIFOCON_PLSIZE_64); - else - val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, - MCP251XFD_REG_FIFOCON_PLSIZE_8); - - if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) - val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, - MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); - else - val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, - MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); - - err = regmap_write(priv->map_reg, - MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO), - val); - if (err) - return err; - - /* RX FIFOs */ - mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { - err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); - if (err) - return err; - - err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); - if (err) - return err; - } - - return 0; -} - static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) { struct mcp251xfd_ecc *ecc = &priv->ecc; @@ -968,18 +540,10 @@ static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) return err; } -static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) -{ - struct mcp251xfd_ecc *ecc = &priv->ecc; - - ecc->ecc_stat = 0; -} - static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) { u8 mode; - if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) @@ -1186,433 +750,6 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev, return __mcp251xfd_get_berr_counter(ndev, bec); } -static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) -{ - u8 tef_tail_chip, tef_tail; - int err; - - if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) - return 0; - - err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); - if (err) - return err; - - tef_tail = mcp251xfd_get_tef_tail(priv); - if (tef_tail_chip != tef_tail) { - netdev_err(priv->ndev, - "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", - tef_tail_chip, tef_tail); - return -EILSEQ; - } - - return 0; -} - -static int -mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring) -{ - u8 rx_tail_chip, rx_tail; - int err; - - if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) - return 0; - - err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); - if (err) - return err; - - rx_tail = mcp251xfd_get_rx_tail(ring); - if (rx_tail_chip != rx_tail) { - netdev_err(priv->ndev, - "RX tail of chip (%d) and ours (%d) inconsistent.\n", - rx_tail_chip, rx_tail); - return -EILSEQ; - } - - return 0; -} - -static int -mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - u32 tef_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); - if (err) - return err; - - if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { - netdev_err(priv->ndev, - "Transmit Event FIFO buffer overflow.\n"); - return -ENOBUFS; - } - - netdev_info(priv->ndev, - "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", - tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? - "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? - "not empty" : "empty", - seq, priv->tef->tail, priv->tef->head, tx_ring->head); - - /* The Sequence Number in the TEF doesn't match our tef_tail. */ - return -EAGAIN; -} - -static int -mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, - const struct mcp251xfd_hw_tef_obj *hw_tef_obj, - unsigned int *frame_len_ptr) -{ - struct net_device_stats *stats = &priv->ndev->stats; - struct sk_buff *skb; - u32 seq, seq_masked, tef_tail_masked, tef_tail; - - seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, - hw_tef_obj->flags); - - /* Use the MCP2517FD mask on the MCP2518FD, too. We only - * compare 7 bits, this should be enough to detect - * net-yet-completed, i.e. old TEF objects. - */ - seq_masked = seq & - field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - tef_tail_masked = priv->tef->tail & - field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - if (seq_masked != tef_tail_masked) - return mcp251xfd_handle_tefif_recover(priv, seq); - - tef_tail = mcp251xfd_get_tef_tail(priv); - skb = priv->can.echo_skb[tef_tail]; - if (skb) - mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); - stats->tx_bytes += - can_rx_offload_get_echo_skb(&priv->offload, - tef_tail, hw_tef_obj->ts, - frame_len_ptr); - stats->tx_packets++; - priv->tef->tail++; - - return 0; -} - -static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - unsigned int new_head; - u8 chip_tx_tail; - int err; - - err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); - if (err) - return err; - - /* chip_tx_tail, is the next TX-Object send by the HW. - * The new TEF head must be >= the old head, ... - */ - new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; - if (new_head <= priv->tef->head) - new_head += tx_ring->obj_num; - - /* ... but it cannot exceed the TX head. */ - priv->tef->head = min(new_head, tx_ring->head); - - return mcp251xfd_check_tef_tail(priv); -} - -static inline int -mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, - struct mcp251xfd_hw_tef_obj *hw_tef_obj, - const u8 offset, const u8 len) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - const int val_bytes = regmap_get_val_bytes(priv->map_rx); - - if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && - (offset > tx_ring->obj_num || - len > tx_ring->obj_num || - offset + len > tx_ring->obj_num)) { - netdev_err(priv->ndev, - "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n", - tx_ring->obj_num, offset, len); - return -ERANGE; - } - - return regmap_bulk_read(priv->map_rx, - mcp251xfd_get_tef_obj_addr(offset), - hw_tef_obj, - sizeof(*hw_tef_obj) / val_bytes * len); -} - -static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) -{ - struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; - unsigned int total_frame_len = 0; - u8 tef_tail, len, l; - int err, i; - - err = mcp251xfd_tef_ring_update(priv); - if (err) - return err; - - tef_tail = mcp251xfd_get_tef_tail(priv); - len = mcp251xfd_get_tef_len(priv); - l = mcp251xfd_get_tef_linear_len(priv); - err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); - if (err) - return err; - - if (l < len) { - err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); - if (err) - return err; - } - - for (i = 0; i < len; i++) { - unsigned int frame_len = 0; - - err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); - /* -EAGAIN means the Sequence Number in the TEF - * doesn't match our tef_tail. This can happen if we - * read the TEF objects too early. Leave loop let the - * interrupt handler call us again. - */ - if (err == -EAGAIN) - goto out_netif_wake_queue; - if (err) - return err; - - total_frame_len += frame_len; - } - - out_netif_wake_queue: - len = i; /* number of handled goods TEFs */ - if (len) { - struct mcp251xfd_tef_ring *ring = priv->tef; - struct mcp251xfd_tx_ring *tx_ring = priv->tx; - int offset; - - /* Increment the TEF FIFO tail pointer 'len' times in - * a single SPI message. - * - * Note: - * Calculate offset, so that the SPI transfer ends on - * the last message of the uinc_xfer array, which has - * "cs_change == 0", to properly deactivate the chip - * select. - */ - offset = ARRAY_SIZE(ring->uinc_xfer) - len; - err = spi_sync_transfer(priv->spi, - ring->uinc_xfer + offset, len); - if (err) - return err; - - tx_ring->tail += len; - netdev_completed_queue(priv->ndev, len, total_frame_len); - - err = mcp251xfd_check_tef_tail(priv); - if (err) - return err; - } - - mcp251xfd_ecc_tefif_successful(priv); - - if (mcp251xfd_get_tx_free(priv->tx)) { - /* Make sure that anybody stopping the queue after - * this sees the new tx_ring->tail. - */ - smp_mb(); - netif_wake_queue(priv->ndev); - } - - return 0; -} - -static int -mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, - struct mcp251xfd_rx_ring *ring) -{ - u32 new_head; - u8 chip_rx_head; - int err; - - err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head); - if (err) - return err; - - /* chip_rx_head, is the next RX-Object filled by the HW. - * The new RX head must be >= the old head. - */ - new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; - if (new_head <= ring->head) - new_head += ring->obj_num; - - ring->head = new_head; - - return mcp251xfd_check_rx_tail(priv, ring); -} - -static void -mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, - struct sk_buff *skb) -{ - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - u8 dlc; - - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { - u32 sid, eid; - - eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); - sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); - - cfd->can_id = CAN_EFF_FLAG | - FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | - FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); - } else { - cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, - hw_rx_obj->id); - } - - dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags); - - /* CANFD */ - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { - - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) - cfd->flags |= CANFD_ESI; - - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) - cfd->flags |= CANFD_BRS; - - cfd->len = can_fd_dlc2len(dlc); - } else { - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) - cfd->can_id |= CAN_RTR_FLAG; - - can_frame_set_cc_len((struct can_frame *)cfd, dlc, - priv->can.ctrlmode); - } - - if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) - memcpy(cfd->data, hw_rx_obj->data, cfd->len); - - mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); -} - -static int -mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, - struct mcp251xfd_rx_ring *ring, - const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) -{ - struct net_device_stats *stats = &priv->ndev->stats; - struct sk_buff *skb; - struct canfd_frame *cfd; - int err; - - if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) - skb = alloc_canfd_skb(priv->ndev, &cfd); - else - skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); - - if (!skb) { - stats->rx_dropped++; - return 0; - } - - mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); - err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); - if (err) - stats->rx_fifo_errors++; - - return 0; -} - -static inline int -mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring, - struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, - const u8 offset, const u8 len) -{ - const int val_bytes = regmap_get_val_bytes(priv->map_rx); - int err; - - err = regmap_bulk_read(priv->map_rx, - mcp251xfd_get_rx_obj_addr(ring, offset), - hw_rx_obj, - len * ring->obj_size / val_bytes); - - return err; -} - -static int -mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, - struct mcp251xfd_rx_ring *ring) -{ - struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; - u8 rx_tail, len; - int err, i; - - err = mcp251xfd_rx_ring_update(priv, ring); - if (err) - return err; - - while ((len = mcp251xfd_get_rx_linear_len(ring))) { - int offset; - - rx_tail = mcp251xfd_get_rx_tail(ring); - - err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, - rx_tail, len); - if (err) - return err; - - for (i = 0; i < len; i++) { - err = mcp251xfd_handle_rxif_one(priv, ring, - (void *)hw_rx_obj + - i * ring->obj_size); - if (err) - return err; - } - - /* Increment the RX FIFO tail pointer 'len' times in a - * single SPI message. - * - * Note: - * Calculate offset, so that the SPI transfer ends on - * the last message of the uinc_xfer array, which has - * "cs_change == 0", to properly deactivate the chip - * select. - */ - offset = ARRAY_SIZE(ring->uinc_xfer) - len; - err = spi_sync_transfer(priv->spi, - ring->uinc_xfer + offset, len); - if (err) - return err; - - ring->tail += len; - } - - return 0; -} - -static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) -{ - struct mcp251xfd_rx_ring *ring; - int err, n; - - mcp251xfd_for_each_rx_ring(priv, ring, n) { - err = mcp251xfd_handle_rxif_ring(priv, ring); - if (err) - return err; - } - - return 0; -} - static struct sk_buff * mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, struct can_frame **cf, u32 *timestamp) @@ -1653,12 +790,15 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) /* If SERRIF is active, there was a RX MAB overflow. */ if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) { - netdev_info(priv->ndev, - "RX-%d: MAB overflow detected.\n", - ring->nr); + if (net_ratelimit()) + netdev_dbg(priv->ndev, + "RX-%d: MAB overflow detected.\n", + ring->nr); } else { - netdev_info(priv->ndev, - "RX-%d: FIFO overflow.\n", ring->nr); + if (net_ratelimit()) + netdev_dbg(priv->ndev, + "RX-%d: FIFO overflow.\n", + ring->nr); } err = regmap_update_bits(priv->map_reg, @@ -2311,212 +1451,23 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) return handled; } -static inline struct -mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) -{ - u8 tx_head; - - tx_head = mcp251xfd_get_tx_head(tx_ring); - - return &tx_ring->obj[tx_head]; -} - -static void -mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, - struct mcp251xfd_tx_obj *tx_obj, - const struct sk_buff *skb, - unsigned int seq) -{ - const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; - union mcp251xfd_tx_obj_load_buf *load_buf; - u8 dlc; - u32 id, flags; - int len_sanitized = 0, len; - - if (cfd->can_id & CAN_EFF_FLAG) { - u32 sid, eid; - - sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); - eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); - - id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | - FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); - - flags = MCP251XFD_OBJ_FLAGS_IDE; - } else { - id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); - flags = 0; - } - - /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't - * harm, only the lower 7 bits will be transferred into the - * TEF object. - */ - flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq); - - if (cfd->can_id & CAN_RTR_FLAG) - flags |= MCP251XFD_OBJ_FLAGS_RTR; - else - len_sanitized = canfd_sanitize_len(cfd->len); - - /* CANFD */ - if (can_is_canfd_skb(skb)) { - if (cfd->flags & CANFD_ESI) - flags |= MCP251XFD_OBJ_FLAGS_ESI; - - flags |= MCP251XFD_OBJ_FLAGS_FDF; - - if (cfd->flags & CANFD_BRS) - flags |= MCP251XFD_OBJ_FLAGS_BRS; - - dlc = can_fd_len2dlc(cfd->len); - } else { - dlc = can_get_cc_dlc((struct can_frame *)cfd, - priv->can.ctrlmode); - } - - flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc); - - load_buf = &tx_obj->buf; - if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) - hw_tx_obj = &load_buf->crc.hw_tx_obj; - else - hw_tx_obj = &load_buf->nocrc.hw_tx_obj; - - put_unaligned_le32(id, &hw_tx_obj->id); - put_unaligned_le32(flags, &hw_tx_obj->flags); - - /* Copy data */ - memcpy(hw_tx_obj->data, cfd->data, cfd->len); - - /* Clear unused data at end of CAN frame */ - if (MCP251XFD_SANITIZE_CAN && len_sanitized) { - int pad_len; - - pad_len = len_sanitized - cfd->len; - if (pad_len) - memset(hw_tx_obj->data + cfd->len, 0x0, pad_len); - } - - /* Number of bytes to be written into the RAM of the controller */ - len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); - if (MCP251XFD_SANITIZE_CAN) - len += round_up(len_sanitized, sizeof(u32)); - else - len += round_up(cfd->len, sizeof(u32)); - - if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { - u16 crc; - - mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, - len); - /* CRC */ - len += sizeof(load_buf->crc.cmd); - crc = mcp251xfd_crc16_compute(&load_buf->crc, len); - put_unaligned_be16(crc, (void *)load_buf + len); - - /* Total length */ - len += sizeof(load_buf->crc.crc); - } else { - len += sizeof(load_buf->nocrc.cmd); - } - - tx_obj->xfer[0].len = len; -} - -static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, - struct mcp251xfd_tx_obj *tx_obj) -{ - return spi_async(priv->spi, &tx_obj->msg); -} - -static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, - struct mcp251xfd_tx_ring *tx_ring) -{ - if (mcp251xfd_get_tx_free(tx_ring) > 0) - return false; - - netif_stop_queue(priv->ndev); - - /* Memory barrier before checking tx_free (head and tail) */ - smp_mb(); - - if (mcp251xfd_get_tx_free(tx_ring) == 0) { - netdev_dbg(priv->ndev, - "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", - tx_ring->head, tx_ring->tail, - tx_ring->head - tx_ring->tail); - - return true; - } - - netif_start_queue(priv->ndev); - - return false; -} - -static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, - struct net_device *ndev) -{ - struct mcp251xfd_priv *priv = netdev_priv(ndev); - struct mcp251xfd_tx_ring *tx_ring = priv->tx; - struct mcp251xfd_tx_obj *tx_obj; - unsigned int frame_len; - u8 tx_head; - int err; - - if (can_dropped_invalid_skb(ndev, skb)) - return NETDEV_TX_OK; - - if (mcp251xfd_tx_busy(priv, tx_ring)) - return NETDEV_TX_BUSY; - - tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); - mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); - - /* Stop queue if we occupy the complete TX FIFO */ - tx_head = mcp251xfd_get_tx_head(tx_ring); - tx_ring->head++; - if (mcp251xfd_get_tx_free(tx_ring) == 0) - netif_stop_queue(ndev); - - frame_len = can_skb_get_frame_len(skb); - err = can_put_echo_skb(skb, ndev, tx_head, frame_len); - if (!err) - netdev_sent_queue(priv->ndev, frame_len); - - err = mcp251xfd_tx_obj_write(priv, tx_obj); - if (err) - goto out_err; - - return NETDEV_TX_OK; - - out_err: - netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); - - return NETDEV_TX_OK; -} - static int mcp251xfd_open(struct net_device *ndev) { struct mcp251xfd_priv *priv = netdev_priv(ndev); const struct spi_device *spi = priv->spi; int err; - err = pm_runtime_get_sync(ndev->dev.parent); - if (err < 0) { - pm_runtime_put_noidle(ndev->dev.parent); + err = open_candev(ndev); + if (err) return err; - } - err = open_candev(ndev); + err = pm_runtime_resume_and_get(ndev->dev.parent); if (err) - goto out_pm_runtime_put; + goto out_close_candev; err = mcp251xfd_ring_alloc(priv); if (err) - goto out_close_candev; + goto out_pm_runtime_put; err = mcp251xfd_transceiver_enable(priv); if (err) @@ -2552,11 +1503,11 @@ static int mcp251xfd_open(struct net_device *ndev) mcp251xfd_transceiver_disable(priv); out_mcp251xfd_ring_free: mcp251xfd_ring_free(priv); - out_close_candev: - close_candev(ndev); out_pm_runtime_put: mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); pm_runtime_put(ndev->dev.parent); + out_close_candev: + close_candev(ndev); return err; } @@ -2625,7 +1576,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) if (!mcp251xfd_is_251X(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, - "Detected %s, but firmware specifies a %s. Fixing up.", + "Detected %s, but firmware specifies a %s. Fixing up.\n", __mcp251xfd_get_model_str(devtype_data->model), mcp251xfd_get_model_str(priv)); } @@ -2662,7 +1613,7 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) return 0; netdev_info(priv->ndev, - "RX_INT active after softreset, disabling RX_INT support."); + "RX_INT active after softreset, disabling RX_INT support.\n"); devm_gpiod_put(&priv->spi->dev, priv->rx_int); priv->rx_int = NULL; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 297491516a26..7b120c716228 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -250,7 +250,6 @@ mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const b return 0; } - static int mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv, struct spi_message *msg, unsigned int data_len) diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c new file mode 100644 index 000000000000..92f9e9b01289 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <asm/unaligned.h> + +#include "mcp251xfd.h" + +static inline u8 +mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, + union mcp251xfd_write_reg_buf *write_reg_buf, + const u16 reg, const u32 mask, const u32 val) +{ + u8 first_byte, last_byte, len; + u8 *data; + __le32 val_le32; + + first_byte = mcp251xfd_first_byte_set(mask); + last_byte = mcp251xfd_last_byte_set(mask); + len = last_byte - first_byte + 1; + + data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte); + val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); + memcpy(data, &val_le32, len); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) { + u16 crc; + + mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, + len); + /* CRC */ + len += sizeof(write_reg_buf->crc.cmd); + crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); + put_unaligned_be16(crc, (void *)write_reg_buf + len); + + /* Total length */ + len += sizeof(write_reg_buf->crc.crc); + } else { + len += sizeof(write_reg_buf->nocrc.cmd); + } + + return len; +} + +static void +mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_tx_ring *ring, + struct mcp251xfd_tx_obj *tx_obj, + const u8 rts_buf_len, + const u8 n) +{ + struct spi_transfer *xfer; + u16 addr; + + /* FIFO load */ + addr = mcp251xfd_get_tx_obj_addr(ring, n); + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) + mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, + addr); + else + mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, + addr); + + xfer = &tx_obj->xfer[0]; + xfer->tx_buf = &tx_obj->buf; + xfer->len = 0; /* actual len is assigned on the fly */ + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + + /* FIFO request to send */ + xfer = &tx_obj->xfer[1]; + xfer->tx_buf = &ring->rts_buf; + xfer->len = rts_buf_len; + + /* SPI message */ + spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, + ARRAY_SIZE(tx_obj->xfer)); +} + +void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_tef_ring *tef_ring; + struct mcp251xfd_tx_ring *tx_ring; + struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; + struct mcp251xfd_tx_obj *tx_obj; + struct spi_transfer *xfer; + u32 val; + u16 addr; + u8 len; + int i, j; + + netdev_reset_queue(priv->ndev); + + /* TEF */ + tef_ring = priv->tef; + tef_ring->head = 0; + tef_ring->tail = 0; + + /* FIFO increment TEF tail pointer */ + addr = MCP251XFD_REG_TEFCON; + val = MCP251XFD_REG_TEFCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, + addr, val, val); + + for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { + xfer = &tef_ring->uinc_xfer[j]; + xfer->tx_buf = &tef_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } + + /* "cs_change == 1" on the last transfer results in an active + * chip select after the complete SPI message. This causes the + * controller to interpret the next register access as + * data. Set "cs_change" of the last transfer to "0" to + * properly deactivate the chip select at the end of the + * message. + */ + xfer->cs_change = 0; + + /* TX */ + tx_ring = priv->tx; + tx_ring->head = 0; + tx_ring->tail = 0; + tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num); + + /* FIFO request to send */ + addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO); + val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, + addr, val, val); + + mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) + mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); + + /* RX */ + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + rx_ring->head = 0; + rx_ring->tail = 0; + rx_ring->nr = i; + rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i); + + if (!prev_rx_ring) + rx_ring->base = + mcp251xfd_get_tx_obj_addr(tx_ring, + tx_ring->obj_num); + else + rx_ring->base = prev_rx_ring->base + + prev_rx_ring->obj_size * + prev_rx_ring->obj_num; + + prev_rx_ring = rx_ring; + + /* FIFO increment RX tail pointer */ + addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); + val = MCP251XFD_REG_FIFOCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, + addr, val, val); + + for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { + xfer = &rx_ring->uinc_xfer[j]; + xfer->tx_buf = &rx_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } + + /* "cs_change == 1" on the last transfer results in an + * active chip select after the complete SPI + * message. This causes the controller to interpret + * the next register access as data. Set "cs_change" + * of the last transfer to "0" to properly deactivate + * the chip select at the end of the message. + */ + xfer->cs_change = 0; + } +} + +void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) +{ + int i; + + for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { + kfree(priv->rx[i]); + priv->rx[i] = NULL; + } +} + +int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_tx_ring *tx_ring; + struct mcp251xfd_rx_ring *rx_ring; + int tef_obj_size, tx_obj_size, rx_obj_size; + int tx_obj_num; + int ram_free, i; + + tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj); + if (mcp251xfd_is_fd_mode(priv)) { + tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD; + tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); + rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); + } else { + tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN; + tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); + rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); + } + + tx_ring = priv->tx; + tx_ring->obj_num = tx_obj_num; + tx_ring->obj_size = tx_obj_size; + + ram_free = MCP251XFD_RAM_SIZE - tx_obj_num * + (tef_obj_size + tx_obj_size); + + for (i = 0; + i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size; + i++) { + int rx_obj_num; + + rx_obj_num = ram_free / rx_obj_size; + rx_obj_num = min(1 << (fls(rx_obj_num) - 1), + MCP251XFD_RX_OBJ_NUM_MAX); + + rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, + GFP_KERNEL); + if (!rx_ring) { + mcp251xfd_ring_free(priv); + return -ENOMEM; + } + rx_ring->obj_num = rx_obj_num; + rx_ring->obj_size = rx_obj_size; + priv->rx[i] = rx_ring; + + ram_free -= rx_ring->obj_num * rx_ring->obj_size; + } + priv->rx_ring_num = i; + + netdev_dbg(priv->ndev, + "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n", + tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num, + tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num); + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + netdev_dbg(priv->ndev, + "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n", + i, rx_ring->obj_num, rx_ring->obj_size, + rx_ring->obj_size * rx_ring->obj_num); + } + + netdev_dbg(priv->ndev, + "FIFO setup: free: %d bytes\n", + ram_free); + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c new file mode 100644 index 000000000000..63f2526464b3 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline int +mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *rx_head) +{ + u32 fifo_sta; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), + &fifo_sta); + if (err) + return err; + + *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + return 0; +} + +static inline int +mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *rx_tail) +{ + u32 fifo_ua; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), + &fifo_ua); + if (err) + return err; + + fifo_ua -= ring->base - MCP251XFD_RAM_START; + *rx_tail = fifo_ua / ring->obj_size; + + return 0; +} + +static int +mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring) +{ + u8 rx_tail_chip, rx_tail; + int err; + + if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) + return 0; + + err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); + if (err) + return err; + + rx_tail = mcp251xfd_get_rx_tail(ring); + if (rx_tail_chip != rx_tail) { + netdev_err(priv->ndev, + "RX tail of chip (%d) and ours (%d) inconsistent.\n", + rx_tail_chip, rx_tail); + return -EILSEQ; + } + + return 0; +} + +static int +mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring) +{ + u32 new_head; + u8 chip_rx_head; + int err; + + err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head); + if (err) + return err; + + /* chip_rx_head, is the next RX-Object filled by the HW. + * The new RX head must be >= the old head. + */ + new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; + if (new_head <= ring->head) + new_head += ring->obj_num; + + ring->head = new_head; + + return mcp251xfd_check_rx_tail(priv, ring); +} + +static void +mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, + struct sk_buff *skb) +{ + struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + u8 dlc; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { + u32 sid, eid; + + eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); + sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); + + cfd->can_id = CAN_EFF_FLAG | + FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | + FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); + } else { + cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, + hw_rx_obj->id); + } + + dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags); + + /* CANFD */ + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) + cfd->flags |= CANFD_ESI; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) + cfd->flags |= CANFD_BRS; + + cfd->len = can_fd_dlc2len(dlc); + } else { + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) + cfd->can_id |= CAN_RTR_FLAG; + + can_frame_set_cc_len((struct can_frame *)cfd, dlc, + priv->can.ctrlmode); + } + + if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) + memcpy(cfd->data, hw_rx_obj->data, cfd->len); + + mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); +} + +static int +mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring, + const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct sk_buff *skb; + struct canfd_frame *cfd; + int err; + + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) + skb = alloc_canfd_skb(priv->ndev, &cfd); + else + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); + + if (!skb) { + stats->rx_dropped++; + return 0; + } + + mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); + err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); + if (err) + stats->rx_fifo_errors++; + + return 0; +} + +static inline int +mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, + const u8 offset, const u8 len) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + int err; + + err = regmap_bulk_read(priv->map_rx, + mcp251xfd_get_rx_obj_addr(ring, offset), + hw_rx_obj, + len * ring->obj_size / val_bytes); + + return err; +} + +static int +mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring) +{ + struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; + u8 rx_tail, len; + int err, i; + + err = mcp251xfd_rx_ring_update(priv, ring); + if (err) + return err; + + while ((len = mcp251xfd_get_rx_linear_len(ring))) { + int offset; + + rx_tail = mcp251xfd_get_rx_tail(ring); + + err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, + rx_tail, len); + if (err) + return err; + + for (i = 0; i < len; i++) { + err = mcp251xfd_handle_rxif_one(priv, ring, + (void *)hw_rx_obj + + i * ring->obj_size); + if (err) + return err; + } + + /* Increment the RX FIFO tail pointer 'len' times in a + * single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + ring->tail += len; + } + + return 0; +} + +int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_rx_ring *ring; + int err, n; + + mcp251xfd_for_each_rx_ring(priv, ring, n) { + err = mcp251xfd_handle_rxif_ring(priv, ring); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c new file mode 100644 index 000000000000..406166005b99 --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline int +mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, + u8 *tef_tail) +{ + u32 tef_ua; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); + if (err) + return err; + + *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); + + return 0; +} + +static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) +{ + u8 tef_tail_chip, tef_tail; + int err; + + if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) + return 0; + + err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); + if (err) + return err; + + tef_tail = mcp251xfd_get_tef_tail(priv); + if (tef_tail_chip != tef_tail) { + netdev_err(priv->ndev, + "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", + tef_tail_chip, tef_tail); + return -EILSEQ; + } + + return 0; +} + +static int +mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + u32 tef_sta; + int err; + + err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); + if (err) + return err; + + if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { + netdev_err(priv->ndev, + "Transmit Event FIFO buffer overflow.\n"); + return -ENOBUFS; + } + + netdev_info(priv->ndev, + "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", + tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? + "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? + "not empty" : "empty", + seq, priv->tef->tail, priv->tef->head, tx_ring->head); + + /* The Sequence Number in the TEF doesn't match our tef_tail. */ + return -EAGAIN; +} + +static int +mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, + const struct mcp251xfd_hw_tef_obj *hw_tef_obj, + unsigned int *frame_len_ptr) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct sk_buff *skb; + u32 seq, seq_masked, tef_tail_masked, tef_tail; + + seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, + hw_tef_obj->flags); + + /* Use the MCP2517FD mask on the MCP2518FD, too. We only + * compare 7 bits, this should be enough to detect + * net-yet-completed, i.e. old TEF objects. + */ + seq_masked = seq & + field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); + tef_tail_masked = priv->tef->tail & + field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); + if (seq_masked != tef_tail_masked) + return mcp251xfd_handle_tefif_recover(priv, seq); + + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; + if (skb) + mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); + stats->tx_bytes += + can_rx_offload_get_echo_skb(&priv->offload, + tef_tail, hw_tef_obj->ts, + frame_len_ptr); + stats->tx_packets++; + priv->tef->tail++; + + return 0; +} + +static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + unsigned int new_head; + u8 chip_tx_tail; + int err; + + err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); + if (err) + return err; + + /* chip_tx_tail, is the next TX-Object send by the HW. + * The new TEF head must be >= the old head, ... + */ + new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; + if (new_head <= priv->tef->head) + new_head += tx_ring->obj_num; + + /* ... but it cannot exceed the TX head. */ + priv->tef->head = min(new_head, tx_ring->head); + + return mcp251xfd_check_tef_tail(priv); +} + +static inline int +mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, + struct mcp251xfd_hw_tef_obj *hw_tef_obj, + const u8 offset, const u8 len) +{ + const struct mcp251xfd_tx_ring *tx_ring = priv->tx; + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + + if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && + (offset > tx_ring->obj_num || + len > tx_ring->obj_num || + offset + len > tx_ring->obj_num)) { + netdev_err(priv->ndev, + "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n", + tx_ring->obj_num, offset, len); + return -ERANGE; + } + + return regmap_bulk_read(priv->map_rx, + mcp251xfd_get_tef_obj_addr(offset), + hw_tef_obj, + sizeof(*hw_tef_obj) / val_bytes * len); +} + +static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_ecc *ecc = &priv->ecc; + + ecc->ecc_stat = 0; +} + +int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; + unsigned int total_frame_len = 0; + u8 tef_tail, len, l; + int err, i; + + err = mcp251xfd_tef_ring_update(priv); + if (err) + return err; + + tef_tail = mcp251xfd_get_tef_tail(priv); + len = mcp251xfd_get_tef_len(priv); + l = mcp251xfd_get_tef_linear_len(priv); + err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); + if (err) + return err; + + if (l < len) { + err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); + if (err) + return err; + } + + for (i = 0; i < len; i++) { + unsigned int frame_len = 0; + + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); + /* -EAGAIN means the Sequence Number in the TEF + * doesn't match our tef_tail. This can happen if we + * read the TEF objects too early. Leave loop let the + * interrupt handler call us again. + */ + if (err == -EAGAIN) + goto out_netif_wake_queue; + if (err) + return err; + + total_frame_len += frame_len; + } + + out_netif_wake_queue: + len = i; /* number of handled goods TEFs */ + if (len) { + struct mcp251xfd_tef_ring *ring = priv->tef; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + int offset; + + /* Increment the TEF FIFO tail pointer 'len' times in + * a single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + tx_ring->tail += len; + netdev_completed_queue(priv->ndev, len, total_frame_len); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + } + + mcp251xfd_ecc_tefif_successful(priv); + + if (mcp251xfd_get_tx_free(priv->tx)) { + /* Make sure that anybody stopping the queue after + * this sees the new tx_ring->tail. + */ + smp_mb(); + netif_wake_queue(priv->ndev); + } + + return 0; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c new file mode 100644 index 000000000000..ffb6c36b7d9b --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// mcp251xfd - Microchip MCP251xFD Family CAN controller driver +// +// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Marc Kleine-Budde <kernel@pengutronix.de> +// +// Based on: +// +// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface +// +// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org> +// + +#include <asm/unaligned.h> +#include <linux/bitfield.h> + +#include "mcp251xfd.h" + +static inline struct +mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) +{ + u8 tx_head; + + tx_head = mcp251xfd_get_tx_head(tx_ring); + + return &tx_ring->obj[tx_head]; +} + +static void +mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_obj *tx_obj, + const struct sk_buff *skb, + unsigned int seq) +{ + const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; + union mcp251xfd_tx_obj_load_buf *load_buf; + u8 dlc; + u32 id, flags; + int len_sanitized = 0, len; + + if (cfd->can_id & CAN_EFF_FLAG) { + u32 sid, eid; + + sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); + eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); + + id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | + FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); + + flags = MCP251XFD_OBJ_FLAGS_IDE; + } else { + id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); + flags = 0; + } + + /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't + * harm, only the lower 7 bits will be transferred into the + * TEF object. + */ + flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq); + + if (cfd->can_id & CAN_RTR_FLAG) + flags |= MCP251XFD_OBJ_FLAGS_RTR; + else + len_sanitized = canfd_sanitize_len(cfd->len); + + /* CANFD */ + if (can_is_canfd_skb(skb)) { + if (cfd->flags & CANFD_ESI) + flags |= MCP251XFD_OBJ_FLAGS_ESI; + + flags |= MCP251XFD_OBJ_FLAGS_FDF; + + if (cfd->flags & CANFD_BRS) + flags |= MCP251XFD_OBJ_FLAGS_BRS; + + dlc = can_fd_len2dlc(cfd->len); + } else { + dlc = can_get_cc_dlc((struct can_frame *)cfd, + priv->can.ctrlmode); + } + + flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc); + + load_buf = &tx_obj->buf; + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) + hw_tx_obj = &load_buf->crc.hw_tx_obj; + else + hw_tx_obj = &load_buf->nocrc.hw_tx_obj; + + put_unaligned_le32(id, &hw_tx_obj->id); + put_unaligned_le32(flags, &hw_tx_obj->flags); + + /* Copy data */ + memcpy(hw_tx_obj->data, cfd->data, cfd->len); + + /* Clear unused data at end of CAN frame */ + if (MCP251XFD_SANITIZE_CAN && len_sanitized) { + int pad_len; + + pad_len = len_sanitized - cfd->len; + if (pad_len) + memset(hw_tx_obj->data + cfd->len, 0x0, pad_len); + } + + /* Number of bytes to be written into the RAM of the controller */ + len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); + if (MCP251XFD_SANITIZE_CAN) + len += round_up(len_sanitized, sizeof(u32)); + else + len += round_up(cfd->len, sizeof(u32)); + + if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { + u16 crc; + + mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, + len); + /* CRC */ + len += sizeof(load_buf->crc.cmd); + crc = mcp251xfd_crc16_compute(&load_buf->crc, len); + put_unaligned_be16(crc, (void *)load_buf + len); + + /* Total length */ + len += sizeof(load_buf->crc.crc); + } else { + len += sizeof(load_buf->nocrc.cmd); + } + + tx_obj->xfer[0].len = len; +} + +static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_obj *tx_obj) +{ + return spi_async(priv->spi, &tx_obj->msg); +} + +static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, + struct mcp251xfd_tx_ring *tx_ring) +{ + if (mcp251xfd_get_tx_free(tx_ring) > 0) + return false; + + netif_stop_queue(priv->ndev); + + /* Memory barrier before checking tx_free (head and tail) */ + smp_mb(); + + if (mcp251xfd_get_tx_free(tx_ring) == 0) { + netdev_dbg(priv->ndev, + "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", + tx_ring->head, tx_ring->tail, + tx_ring->head - tx_ring->tail); + + return true; + } + + netif_start_queue(priv->ndev); + + return false; +} + +netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mcp251xfd_priv *priv = netdev_priv(ndev); + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + struct mcp251xfd_tx_obj *tx_obj; + unsigned int frame_len; + u8 tx_head; + int err; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (mcp251xfd_tx_busy(priv, tx_ring)) + return NETDEV_TX_BUSY; + + tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); + mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); + + /* Stop queue if we occupy the complete TX FIFO */ + tx_head = mcp251xfd_get_tx_head(tx_ring); + tx_ring->head++; + if (mcp251xfd_get_tx_free(tx_ring) == 0) + netif_stop_queue(ndev); + + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len); + + err = mcp251xfd_tx_obj_write(priv, tx_obj); + if (err) + goto out_err; + + return NETDEV_TX_OK; + + out_err: + netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); + + return NETDEV_TX_OK; +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 0f322dabaf65..f551c900803e 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -10,6 +10,7 @@ #ifndef _MCP251XFD_H #define _MCP251XFD_H +#include <linux/bitfield.h> #include <linux/can/core.h> #include <linux/can/dev.h> #include <linux/can/rx-offload.h> @@ -625,6 +626,12 @@ MCP251XFD_IS(2517); MCP251XFD_IS(2518); MCP251XFD_IS(251X); +static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv) +{ + /* listen-only mode works like FD mode */ + return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD); +} + static inline u8 mcp251xfd_first_byte_set(u32 mask) { return (mask & 0x0000ffff) ? @@ -761,6 +768,24 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n) return ring->base + ring->obj_size * n; } +static inline int +mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv, + u8 *tx_tail) +{ + u32 fifo_sta; + int err; + + err = regmap_read(priv->map_reg, + MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO), + &fifo_sta); + if (err) + return err; + + *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + return 0; +} + static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv) { return priv->tef->head & (priv->tx->obj_num - 1); @@ -849,15 +874,24 @@ mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring) (n) < (priv)->rx_ring_num; \ (n)++, (ring) = *((priv)->rx + (n))) -int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); +int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv); u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, const void *data, size_t data_size); u16 mcp251xfd_crc16_compute(const void *data, size_t data_size); +int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv); +void mcp251xfd_ring_init(struct mcp251xfd_priv *priv); +void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); +int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); +int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); +int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, struct sk_buff *skb, u32 timestamp); void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); +netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev); + #if IS_ENABLED(CONFIG_DEV_COREDUMP) void mcp251xfd_dump(const struct mcp251xfd_priv *priv); #else diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 54aa7c25c4de..25d6d81ab4f4 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -61,6 +61,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/reset.h> #define DRV_NAME "sun4i_can" @@ -200,10 +201,20 @@ #define SUN4I_CAN_MAX_IRQ 20 #define SUN4I_MODE_MAX_RETRIES 100 +/** + * struct sun4ican_quirks - Differences between SoC variants. + * + * @has_reset: SoC needs reset deasserted. + */ +struct sun4ican_quirks { + bool has_reset; +}; + struct sun4ican_priv { struct can_priv can; void __iomem *base; struct clk *clk; + struct reset_control *reset; spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ }; @@ -490,18 +501,20 @@ static void sun4i_can_rx(struct net_device *dev) } /* remote frame ? */ - if (fi & SUN4I_MSG_RTR_FLAG) + if (fi & SUN4I_MSG_RTR_FLAG) { id |= CAN_RTR_FLAG; - else + } else { for (i = 0; i < cf->len; i++) cf->data[i] = readl(priv->base + dreg + i * 4); + stats->rx_bytes += cf->len; + } + stats->rx_packets++; + cf->can_id = id; sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF); - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -622,13 +635,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) can_bus_off(dev); } - if (likely(skb)) { - stats->rx_packets++; - stats->rx_bytes += cf->len; + if (likely(skb)) netif_rx(skb); - } else { + else return -ENOMEM; - } return 0; } @@ -651,11 +661,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) if (isrc & SUN4I_INT_TBUF_VLD) { /* transmission complete interrupt */ - stats->tx_bytes += - readl(priv->base + - SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf; + stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; - can_get_echo_skb(dev, 0, NULL); netif_wake_queue(dev); can_led_event(dev, CAN_LED_EVENT_TX); } @@ -702,6 +709,13 @@ static int sun4ican_open(struct net_device *dev) goto exit_irq; } + /* software reset deassert */ + err = reset_control_deassert(priv->reset); + if (err) { + netdev_err(dev, "could not deassert CAN reset\n"); + goto exit_soft_reset; + } + /* turn on clocking for CAN peripheral block */ err = clk_prepare_enable(priv->clk); if (err) { @@ -723,6 +737,8 @@ static int sun4ican_open(struct net_device *dev) exit_can_start: clk_disable_unprepare(priv->clk); exit_clock: + reset_control_assert(priv->reset); +exit_soft_reset: free_irq(dev->irq, dev); exit_irq: close_candev(dev); @@ -736,6 +752,7 @@ static int sun4ican_close(struct net_device *dev) netif_stop_queue(dev); sun4i_can_stop(dev); clk_disable_unprepare(priv->clk); + reset_control_assert(priv->reset); free_irq(dev->irq, dev); close_candev(dev); @@ -750,9 +767,27 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_start_xmit = sun4ican_start_xmit, }; +static const struct sun4ican_quirks sun4ican_quirks_a10 = { + .has_reset = false, +}; + +static const struct sun4ican_quirks sun4ican_quirks_r40 = { + .has_reset = true, +}; + static const struct of_device_id sun4ican_of_match[] = { - {.compatible = "allwinner,sun4i-a10-can"}, - {}, + { + .compatible = "allwinner,sun4i-a10-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun7i-a20-can", + .data = &sun4ican_quirks_a10 + }, { + .compatible = "allwinner,sun8i-r40-can", + .data = &sun4ican_quirks_r40 + }, { + /* sentinel */ + }, }; MODULE_DEVICE_TABLE(of, sun4ican_of_match); @@ -771,10 +806,28 @@ static int sun4ican_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct clk *clk; + struct reset_control *reset = NULL; void __iomem *addr; int err, irq; struct net_device *dev; struct sun4ican_priv *priv; + const struct sun4ican_quirks *quirks; + + quirks = of_device_get_match_data(&pdev->dev); + if (!quirks) { + dev_err(&pdev->dev, "failed to determine the quirks to use\n"); + err = -ENODEV; + goto exit; + } + + if (quirks->has_reset) { + reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(reset)) { + dev_err(&pdev->dev, "unable to request reset\n"); + err = PTR_ERR(reset); + goto exit; + } + } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { @@ -818,6 +871,7 @@ static int sun4ican_probe(struct platform_device *pdev) CAN_CTRLMODE_3_SAMPLES; priv->base = addr; priv->clk = clk; + priv->reset = reset; spin_lock_init(&priv->cmdreg_lock); platform_set_drvdata(pdev, dev); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 353062ead98f..ff31b993ab17 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -859,7 +859,6 @@ static int ti_hecc_probe(struct platform_device *pdev) struct net_device *ndev = (struct net_device *)0; struct ti_hecc_priv *priv; struct device_node *np = pdev->dev.of_node; - struct resource *irq; struct regulator *reg_xceiver; int err = -ENODEV; @@ -904,9 +903,9 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_candev; } - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq) { - dev_err(&pdev->dev, "No irq resource\n"); + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + err = ndev->irq; goto probe_exit_candev; } @@ -920,7 +919,6 @@ static int ti_hecc_probe(struct platform_device *pdev) priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; spin_lock_init(&priv->mbx_lock); - ndev->irq = irq->start; ndev->flags |= IFF_ECHO; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 2b5302e72435..7bedceffdfa3 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -230,7 +230,6 @@ struct ems_tx_urb_context { struct ems_usb *dev; u32 echo_index; - u8 dlc; }; struct ems_usb { @@ -320,10 +319,11 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) } else { for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.can_msg.msg[i]; - } + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } @@ -397,8 +397,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) stats->rx_errors++; } - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -518,9 +516,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb) /* transmission complete interrupt */ netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->dlc; - - can_get_echo_skb(netdev, context->echo_index, NULL); + netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, + NULL); /* Release context */ context->echo_index = MAX_TX_URBS; @@ -806,7 +803,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne context->dev = dev; context->echo_index = i; - context->dlc = cf->len; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index c6068a251fbe..286daaaea0b8 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -183,7 +183,6 @@ struct esd_usb2_net_priv; struct esd_tx_urb_context { struct esd_usb2_net_priv *priv; u32 echo_index; - int len; /* CAN payload length */ }; struct esd_usb2 { @@ -293,8 +292,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } } @@ -334,10 +331,11 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, } else { for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.rx.data[i]; - } + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } @@ -358,8 +356,8 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, if (!msg->msg.txdone.status) { stats->tx_packets++; - stats->tx_bytes += context->len; - can_get_echo_skb(netdev, context->echo_index, NULL); + stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index, + NULL); } else { stats->tx_errors++; can_free_echo_skb(netdev, context->echo_index, NULL); @@ -784,7 +782,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, context->priv = priv; context->echo_index = i; - context->len = cf->len; /* hnd must not be 0 - MSB is stripped in txdone handling */ msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */ diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c index 14e360c9f2c9..1bcdcece5ec7 100644 --- a/drivers/net/can/usb/etas_es58x/es581_4.c +++ b/drivers/net/can/usb/etas_es58x/es581_4.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/units.h> #include <asm/unaligned.h> #include "es58x_core.h" @@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = { .bittiming_const = &es581_4_bittiming_const, .data_bittiming_const = NULL, .tdc_const = NULL, - .bitrate_max = 1 * CAN_MBPS, - .clock = {.freq = 50 * CAN_MHZ}, + .bitrate_max = 1 * MEGA /* BPS */, + .clock = {.freq = 50 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, .tx_start_of_frame = 0xAFAF, .rx_start_of_frame = 0xFAFA, diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 24627ab14626..2ed2370a3166 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -849,13 +849,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, break; } - /* driver/net/can/dev.c:can_restart() takes in account error - * messages in the RX stats. Doing the same here for - * consistency. - */ - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += CAN_ERR_DLC; - if (cf) { if (cf->data[1]) cf->can_id |= CAN_ERR_CRTL; @@ -2094,6 +2087,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) netdev->netdev_ops = &es58x_netdev_ops; netdev->flags |= IFF_ECHO; /* We support local echo */ + netdev->dev_port = channel_idx; ret = register_candev(netdev); if (ret) diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c index 4f0cae29f4d8..ec87126e1a7d 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_fd.c +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> +#include <linux/units.h> #include <asm/unaligned.h> #include "es58x_core.h" @@ -522,8 +523,8 @@ const struct es58x_parameters es58x_fd_param = { * Mbps work in an optimal environment but are not recommended * for production environment. */ - .bitrate_max = 8 * CAN_MBPS, - .clock = {.freq = 80 * CAN_MHZ}, + .bitrate_max = 8 * MEGA /* BPS */, + .clock = {.freq = 80 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO, diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 1b400de00f51..b487e3fe770a 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -321,7 +321,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) - goto resubmit_urb; + goto device_detach; dev = usbcan->canch[hf->channel]; @@ -357,9 +357,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) goto resubmit_urb; } - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += hf->can_dlc; - txc = gs_get_tx_context(dev, hf->echo_id); /* bad devices send bad echo_ids. */ @@ -370,7 +367,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) goto resubmit_urb; } - can_get_echo_skb(netdev, hf->echo_id, NULL); + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id, + NULL); gs_free_tx_context(txc); @@ -406,6 +405,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* USB failure take down all interfaces */ if (rc == -ENODEV) { + device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { if (usbcan->canch[rc]) netif_device_detach(usbcan->canch[rc]->netdev); @@ -507,6 +507,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf->echo_id = idx; hf->channel = dev->channel; + hf->flags = 0; + hf->reserved = 0; cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 390b6bde883c..3a49257f9fa6 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -77,7 +77,6 @@ struct kvaser_usb_dev_card_data { struct kvaser_usb_tx_urb_context { struct kvaser_usb_net_priv *priv; u32 echo_index; - int dlc; }; struct kvaser_usb { @@ -162,8 +161,8 @@ struct kvaser_usb_dev_ops { void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf, int len); void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid); + const struct sk_buff *skb, int *cmd_len, + u16 transid); }; struct kvaser_usb_dev_cfg { diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 0cc0fc866a2a..c4b4d3d0a387 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -279,8 +279,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -567,7 +565,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, goto freeurb; } - buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, + buf = dev->ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index); if (!buf) { stats->tx_dropped++; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index dcee8dc828ec..a26823c5b62a 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -22,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> @@ -295,6 +296,7 @@ struct kvaser_cmd { #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) @@ -869,7 +871,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, struct net_device *netdev = priv->netdev; struct can_frame *cf; struct sk_buff *skb; - struct net_device_stats *stats; enum can_state new_state, old_state; old_state = priv->can.state; @@ -919,9 +920,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, cf->data[6] = bec->txerr; cf->data[7] = bec->rxerr; - stats = &netdev->stats; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1074,8 +1072,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); priv->bec.txerr = bec.txerr; @@ -1109,8 +1105,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, } stats->tx_errors++; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1120,7 +1114,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_tx_urb_context *context; struct kvaser_usb_net_priv *priv; unsigned long irq_flags; + unsigned int len; bool one_shot_fail = false; + bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); @@ -1139,24 +1135,28 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } + + is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && + flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; - if (!one_shot_fail) { - struct net_device_stats *stats = &priv->netdev->stats; - - stats->tx_packets++; - stats->tx_bytes += can_fd_dlc2len(context->dlc); - } spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); - can_get_echo_skb(priv->netdev, context->echo_index, NULL); + len = can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags); + + if (!one_shot_fail && !is_err_frame) { + struct net_device_stats *stats = &priv->netdev->stats; + + stats->tx_packets++; + stats->tx_bytes += len; + } } static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, @@ -1208,13 +1208,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, cf->len = can_cc_dlc2len(cmd->rx_can.dlc); - if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, cmd->rx_can.data, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } @@ -1286,13 +1288,15 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, cf->len = can_cc_dlc2len(dlc); } - if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); } @@ -1371,8 +1375,8 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev, static void * kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd_ext *cmd; @@ -1384,8 +1388,6 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, u32 kcan_id; u32 kcan_header; - *frame_len = nbr_of_bytes; - cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC); if (!cmd) return NULL; @@ -1451,8 +1453,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, static void * kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; @@ -1460,8 +1462,6 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, u32 flags; u32 id; - *frame_len = cf->len; - cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); if (!cmd) return NULL; @@ -1495,7 +1495,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, cmd->tx_can.id = cpu_to_le32(id); cmd->tx_can.flags = flags; - memcpy(cmd->tx_can.data, cf->data, *frame_len); + memcpy(cmd->tx_can.data, cf->data, cf->len); return cmd; } @@ -2003,17 +2003,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev, static void * kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { void *buf; if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD) - buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len, - cmd_len, transid); + buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len, + transid); else - buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len, - cmd_len, transid); + buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len, + transid); return buf; } @@ -2040,7 +2040,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { .clock = { - .freq = 80000000, + .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 80, .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, @@ -2049,7 +2049,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .clock = { - .freq = 24000000, + .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, @@ -2057,7 +2057,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { .clock = { - .freq = 80000000, + .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 24, .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 59ba7c7beec0..c805b999c543 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> @@ -28,10 +29,6 @@ #include "kvaser_usb.h" -/* Forward declaration */ -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; - -#define CAN_USB_CLOCK 8000000 #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ @@ -80,6 +77,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; #define CMD_LEAF_LOG_MESSAGE 106 +/* Leaf frequency options */ +#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 +#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 +#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) +#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -340,18 +343,60 @@ struct kvaser_usb_err_summary { }; }; +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { + .name = "kvaser_usb", + .tseg1_min = KVASER_USB_TSEG1_MIN, + .tseg1_max = KVASER_USB_TSEG1_MAX, + .tseg2_min = KVASER_USB_TSEG2_MIN, + .tseg2_max = KVASER_USB_TSEG2_MAX, + .sjw_max = KVASER_USB_SJW_MAX, + .brp_min = KVASER_USB_BRP_MIN, + .brp_max = KVASER_USB_BRP_MAX, + .brp_inc = KVASER_USB_BRP_INC, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = { + .clock = { + .freq = 8 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = { + .clock = { + .freq = 16 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = { + .clock = { + .freq = 24 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = { + .clock = { + .freq = 32 * MEGA /* Hz */, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, - const struct sk_buff *skb, int *frame_len, - int *cmd_len, u16 transid) + const struct sk_buff *skb, int *cmd_len, + u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; u8 *cmd_tx_can_flags = NULL; /* GCC */ struct can_frame *cf = (struct can_frame *)skb->data; - *frame_len = cf->len; - cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd) { cmd->u.tx_can.tid = transid & 0xff; @@ -471,6 +516,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, return rc; } +static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, + const struct leaf_cmd_softinfo *softinfo) +{ + u32 sw_options = le32_to_cpu(softinfo->sw_options); + + dev->fw_version = le32_to_cpu(softinfo->fw_version); + dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + + switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { + case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz; + break; + case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: + dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz; + break; + } +} + static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -486,14 +552,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) switch (dev->card_data.leaf.family) { case KVASER_LEAF: - dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz; break; } @@ -575,8 +640,6 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, if (skb) { cf->can_id |= CAN_ERR_RESTARTED; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } else { netdev_err(priv->netdev, @@ -589,12 +652,11 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, priv->can.state = CAN_STATE_ERROR_ACTIVE; } - stats->tx_packets++; - stats->tx_bytes += context->dlc; - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - can_get_echo_skb(priv->netdev, context->echo_index, NULL); + stats->tx_packets++; + stats->tx_bytes += can_get_echo_skb(priv->netdev, + context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); @@ -777,8 +839,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, cf->data[6] = es->txerr; cf->data[7] = es->rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1005,7 +1065,8 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, } stats->rx_packets++; - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1225,24 +1286,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; - dev->cfg = &kvaser_usb_leaf_dev_cfg; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } -static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; - static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); @@ -1348,11 +1396,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, }; - -static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { - .clock = { - .freq = CAN_USB_CLOCK, - }, - .timestamp_freq = 1, - .bittiming_const = &kvaser_usb_leaf_bittiming_const, -}; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index a1a154c08b7f..77bddff86252 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -64,7 +64,6 @@ struct mcba_usb_ctx { struct mcba_priv *priv; u32 ndx; - u8 dlc; bool can; }; @@ -184,13 +183,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, ctx = &priv->tx_context[i]; ctx->ndx = i; - if (cf) { + if (cf) ctx->can = true; - ctx->dlc = cf->len; - } else { + else ctx->can = false; - ctx->dlc = 0; - } atomic_dec(&priv->free_ctx_cnt); break; @@ -236,10 +232,10 @@ static void mcba_usb_write_bulk_callback(struct urb *urb) return; netdev->stats.tx_packets++; - netdev->stats.tx_bytes += ctx->dlc; + netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx, + NULL); can_led_event(netdev, CAN_LED_EVENT_TX); - can_get_echo_skb(netdev, ctx->ndx, NULL); } if (urb->status) @@ -450,15 +446,16 @@ static void mcba_usb_process_can(struct mcba_priv *priv, cf->can_id = (sid & 0xffe0) >> 5; } - if (msg->dlc & MCBA_DLC_RTR_MASK) - cf->can_id |= CAN_RTR_FLAG; - cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK); - memcpy(cf->data, msg->data, cf->len); + if (msg->dlc & MCBA_DLC_RTR_MASK) { + cf->can_id |= CAN_RTR_FLAG; + } else { + memcpy(cf->data, msg->data, cf->len); + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; can_led_event(priv->netdev, CAN_LED_EVENT_RX); netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 876218752766..17dc178f555b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -8,6 +8,7 @@ * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> */ +#include <asm/unaligned.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> @@ -520,8 +521,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, &hwts->hwtstamp); } - mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->len; netif_rx(skb); return 0; @@ -678,15 +677,16 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) /* Ignore next byte (client private id) if SRR bit is set */ if (can_id_flags & PCAN_USB_TX_SRR) mc->ptr++; + + /* update statistics */ + mc->netdev->stats.rx_bytes += cf->len; } + mc->netdev->stats.rx_packets++; /* convert timestamp into kernel time */ hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); - /* update statistics */ - mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->len; /* push the skb */ netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 6107fef9f4a0..b850ff8fe4bd 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -291,6 +291,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb) struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; + int tx_bytes; BUG_ON(!context); @@ -305,10 +306,6 @@ static void peak_usb_write_bulk_callback(struct urb *urb) /* check tx status */ switch (urb->status) { case 0: - /* transmission complete */ - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->data_len; - /* prevent tx timeout */ netif_trans_update(netdev); break; @@ -327,12 +324,17 @@ static void peak_usb_write_bulk_callback(struct urb *urb) } /* should always release echo skb and corresponding context */ - can_get_echo_skb(netdev, context->echo_index, NULL); + tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL); context->echo_index = PCAN_USB_MAX_TX_URBS; - /* do wakeup tx queue in case of success only */ - if (!urb->status) + if (!urb->status) { + /* transmission complete */ + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += tx_bytes; + + /* do wakeup tx queue in case of success only */ netif_wake_queue(netdev); + } } /* @@ -344,7 +346,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct urb *urb; u8 *obuf; int i, err; @@ -378,9 +379,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, context->echo_index = i; - /* Note: this works with CANFD frames too */ - context->data_len = cfd->len; - usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index daa19f57e742..f60af573a2e0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -99,7 +99,6 @@ struct peak_time_ref { struct peak_tx_urb_context { struct peak_usb_device *dev; u32 echo_index; - u8 data_len; struct urb *urb; }; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 6bd12549f101..65487ec33566 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -507,13 +507,13 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, if (rx_msg_flags & PUCAN_MSG_EXT_ID) cfd->can_id |= CAN_EFF_FLAG; - if (rx_msg_flags & PUCAN_MSG_RTR) + if (rx_msg_flags & PUCAN_MSG_RTR) { cfd->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cfd->data, rm->d, cfd->len); - + netdev->stats.rx_bytes += cfd->len; + } netdev->stats.rx_packets++; - netdev->stats.rx_bytes += cfd->len; peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high)); @@ -577,9 +577,6 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, if (!skb) return -ENOMEM; - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += cf->len; - peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high)); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 858ab22708fc..ebe087f258e3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -536,17 +536,19 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; - if (rx->flags & PCAN_USBPRO_RTR) + if (rx->flags & PCAN_USBPRO_RTR) { can_frame->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(can_frame->data, rx->data, can_frame->len); + netdev->stats.rx_bytes += can_frame->len; + } + netdev->stats.rx_packets++; + hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32), &hwts->hwtstamp); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->len; netif_rx(skb); return 0; @@ -660,8 +662,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->len; netif_rx(skb); return 0; diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 1679cbe45ded..c7c41d1fd038 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -259,7 +259,6 @@ struct ucan_priv; /* Context Information for transmission URBs */ struct ucan_urb_context { struct ucan_priv *up; - u8 dlc; bool allocated; }; @@ -621,8 +620,11 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) memcpy(cf->data, m->msg.can_msg.data, cf->len); /* don't count error frames as real packets */ - stats->rx_packets++; - stats->rx_bytes += cf->len; + if (!(cf->can_id & CAN_ERR_FLAG)) { + stats->rx_packets++; + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; + } /* pass it to Linux */ netif_rx(skb); @@ -634,7 +636,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, { unsigned long flags; u16 count, i; - u8 echo_index, dlc; + u8 echo_index; u16 len = le16_to_cpu(m->len); struct ucan_urb_context *context; @@ -658,7 +660,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, /* gather information from the context */ context = &up->context_array[echo_index]; - dlc = READ_ONCE(context->dlc); /* Release context and restart queue if necessary. * Also check if the context was allocated @@ -671,8 +672,8 @@ static void ucan_tx_complete_msg(struct ucan_priv *up, UCAN_TX_COMPLETE_SUCCESS) { /* update statistics */ up->netdev->stats.tx_packets++; - up->netdev->stats.tx_bytes += dlc; - can_get_echo_skb(up->netdev, echo_index, NULL); + up->netdev->stats.tx_bytes += + can_get_echo_skb(up->netdev, echo_index, NULL); } else { up->netdev->stats.tx_dropped++; can_free_echo_skb(up->netdev, echo_index, NULL); @@ -1086,8 +1087,6 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, } m->len = cpu_to_le16(mlen); - context->dlc = cf->len; - m->subtype = echo_index; /* build the urb */ diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d1b83bd1b3cb..431af1ec1e3c 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -114,15 +114,12 @@ struct usb_8dev_tx_urb_context { struct usb_8dev_priv *priv; u32 echo_index; - u8 dlc; }; /* Structure to hold all of our device specific stuff */ struct usb_8dev_priv { struct can_priv can; /* must be the first member */ - struct sk_buff *echo_skb[MAX_TX_URBS]; - struct usb_device *udev; struct net_device *netdev; @@ -449,8 +446,6 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - stats->rx_packets++; - stats->rx_bytes += cf->len; netif_rx(skb); } @@ -476,13 +471,14 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; - if (msg->flags & USB_8DEV_RTR) + if (msg->flags & USB_8DEV_RTR) { cf->can_id |= CAN_RTR_FLAG; - else + } else { memcpy(cf->data, msg->data, cf->len); - + stats->rx_bytes += cf->len; + } stats->rx_packets++; - stats->rx_bytes += cf->len; + netif_rx(skb); can_led_event(priv->netdev, CAN_LED_EVENT_RX); @@ -584,9 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb) urb->status); netdev->stats.tx_packets++; - netdev->stats.tx_bytes += context->dlc; - - can_get_echo_skb(netdev, context->echo_index, NULL); + netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); can_led_event(netdev, CAN_LED_EVENT_TX); @@ -657,7 +651,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, context->priv = priv; context->echo_index = i; - context->dlc = cf->len; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 067705e2850b..c42f18845b02 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -87,13 +87,14 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *stats = &dev->stats; - int loop; + int loop, len; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; + len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len; stats->tx_packets++; - stats->tx_bytes += cfd->len; + stats->tx_bytes += len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; @@ -105,7 +106,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) * CAN core already did the echo for us */ stats->rx_packets++; - stats->rx_bytes += cfd->len; + stats->rx_bytes += len; } consume_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8861a7d875e7..47ccc15a3486 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -62,7 +62,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; - len = cfd->len; + len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len; if (netif_rx_ni(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; srcstats->tx_bytes += len; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index e2b15d29d15e..1674b561c9a2 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -787,10 +787,11 @@ static int xcan_rx(struct net_device *ndev, int frame_base) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); if (cf->len > 4) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); - } - stats->rx_bytes += cf->len; + stats->rx_bytes += cf->len; + } stats->rx_packets++; + netif_receive_skb(skb); return 1; @@ -871,8 +872,11 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base) *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); } } - stats->rx_bytes += cf->len; + + if (!(cf->can_id & CAN_RTR_FLAG)) + stats->rx_bytes += cf->len; stats->rx_packets++; + netif_receive_skb(skb); return 1; @@ -965,13 +969,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) xcan_set_error_state(ndev, new_state, skb ? cf : NULL); - if (skb) { - struct net_device_stats *stats = &ndev->stats; - - stats->rx_packets++; - stats->rx_bytes += cf->len; + if (skb) netif_rx(skb); - } } } @@ -1095,8 +1094,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) if (skb) { skb_cf->can_id |= cf.can_id; memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); - stats->rx_packets++; - stats->rx_bytes += CAN_ERR_DLC; netif_rx(skb); } } @@ -1761,7 +1758,12 @@ static int xcan_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ - ndev->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_free; + + ndev->irq = ret; + ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index af4761968733..3867f3d4545f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1860,7 +1860,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_mdb_del); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) +int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct b53_device *dev = ds->priv; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; @@ -1887,7 +1888,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); b53_for_each_port(dev, i) { - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Add this local port to the remote port VLAN control @@ -1911,7 +1912,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) } EXPORT_SYMBOL(b53_br_join); -void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) +void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct b53_device *dev = ds->priv; struct b53_vlan *vl = &dev->vlans[0]; @@ -1923,7 +1924,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_for_each_port(dev, i) { /* Don't touch the remaining ports */ - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 579da74ada64..b41dc8ac2ca8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -324,8 +324,9 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data); -int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); -void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); +int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, + bool *tx_fwd_offload); +void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); int b53_br_flags_pre(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 01e37b75471e..2b88f03e5252 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = { }; MODULE_DEVICE_TABLE(of, b53_spi_of_match); +static const struct spi_device_id b53_spi_ids[] = { + { .name = "bcm5325" }, + { .name = "bcm5365" }, + { .name = "bcm5395" }, + { .name = "bcm5397" }, + { .name = "bcm5398" }, + { .name = "bcm53115" }, + { .name = "bcm53125" }, + { .name = "bcm53128" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, b53_spi_ids); + static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", @@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = { .probe = b53_spi_probe, .remove = b53_spi_remove, .shutdown = b53_spi_shutdown, + .id_table = b53_spi_ids, }; module_spi_driver(b53_spi_driver); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 13aa43b5cffd..33499fcd8848 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -62,6 +62,38 @@ static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port) return REG_SWITCH_STATUS; } +static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port) +{ + switch (port) { + case 0: + return REG_LED_0_CNTRL; + case 1: + return REG_LED_1_CNTRL; + case 2: + return REG_LED_2_CNTRL; + } + + switch (priv->type) { + case BCM4908_DEVICE_ID: + switch (port) { + case 3: + return REG_LED_3_CNTRL; + case 7: + return REG_LED_4_CNTRL; + default: + break; + } + break; + default: + break; + } + + WARN_ONCE(1, "Unsupported port %d\n", port); + + /* RO fallback reg */ + return REG_SWITCH_STATUS; +} + /* Return the number of active ports, not counting the IMP (CPU) port */ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds) { @@ -187,9 +219,14 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) /* Use PHY-driven LED signaling */ if (!enable) { - reg = reg_readl(priv, REG_LED_CNTRL(0)); - reg |= SPDLNK_SRC_SEL; - reg_writel(priv, reg, REG_LED_CNTRL(0)); + u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0); + + if (priv->type == BCM7278_DEVICE_ID || + priv->type == BCM7445_DEVICE_ID) { + reg = reg_led_readl(priv, led_ctrl, 0); + reg |= LED_CNTRL_SPDLNK_SRC_SEL; + reg_led_writel(priv, reg, led_ctrl, 0); + } } } @@ -1232,9 +1269,14 @@ static const u16 bcm_sf2_4908_reg_offsets[] = { [REG_SPHY_CNTRL] = 0x24, [REG_CROSSBAR] = 0xc8, [REG_RGMII_11_CNTRL] = 0x014c, - [REG_LED_0_CNTRL] = 0x40, - [REG_LED_1_CNTRL] = 0x4c, - [REG_LED_2_CNTRL] = 0x58, + [REG_LED_0_CNTRL] = 0x40, + [REG_LED_1_CNTRL] = 0x4c, + [REG_LED_2_CNTRL] = 0x58, + [REG_LED_3_CNTRL] = 0x64, + [REG_LED_4_CNTRL] = 0x88, + [REG_LED_5_CNTRL] = 0xa0, + [REG_LED_AGGREGATE_CTRL] = 0xb8, + }; static const struct bcm_sf2_of_data bcm_sf2_4908_data = { diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 0d48402068d3..00afc94ce522 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -210,6 +210,16 @@ SF2_IO_MACRO(acb); SWITCH_INTR_L2(0); SWITCH_INTR_L2(1); +static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg) +{ + return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg); +} + +static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg) +{ + writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg); +} + /* RXNFC */ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs); diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 7bffc80f241f..da0dedbd6555 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -25,6 +25,10 @@ enum bcm_sf2_reg_offs { REG_LED_0_CNTRL, REG_LED_1_CNTRL, REG_LED_2_CNTRL, + REG_LED_3_CNTRL, + REG_LED_4_CNTRL, + REG_LED_5_CNTRL, + REG_LED_AGGREGATE_CTRL, REG_SWITCH_REG_MAX, }; @@ -56,6 +60,63 @@ enum bcm_sf2_reg_offs { #define CROSSBAR_BCM4908_EXT_GPHY4 1 #define CROSSBAR_BCM4908_EXT_RGMII 2 +/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */ +#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0 +#define LED_CNTRL_M10_ENCODE_SHIFT 2 +#define LED_CNTRL_M100_ENCODE_SHIFT 4 +#define LED_CNTRL_M1000_ENCODE_SHIFT 6 +#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8 +#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10 +#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12 +#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14 +#define LED_CNTRL_RX_DV_EN (1 << 16) +#define LED_CNTRL_TX_EN_EN (1 << 17) +#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18 +#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20 +#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22 +#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24) +#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25) +#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26) +#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27) +#define LED_CNTRL_MASK 0x3 + +/* Register relative to REG_LED_*_CNTRL (BCM4908) */ +#define REG_LED_CTRL 0x0 +#define LED_CTRL_RX_ACT_EN 0x00000001 +#define LED_CTRL_TX_ACT_EN 0x00000002 +#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004 +#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008 +#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010 +#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020 +#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040 +#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080 +#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100 +#define LED_CTRL_ACT_LED_POL_SEL 0x00000200 +#define LED_CTRL_LED_SPD_OVRD 0x00001c00 +#define LED_CTRL_LNK_STATUS_OVRD 0x00002000 +#define LED_CTRL_SPD_OVRD_EN 0x00004000 +#define LED_CTRL_LNK_OVRD_EN 0x00008000 + +/* Register relative to REG_LED_*_CNTRL (BCM4908) */ +#define REG_LED_LINK_SPEED_ENC_SEL 0x4 +#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0 +#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3 +#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6 +#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9 +#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12 +#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15 +#define LED_LINK_SPEED_ENC_SEL_MASK 0x7 + +/* Register relative to REG_LED_*_CNTRL (BCM4908) */ +#define REG_LED_LINK_SPEED_ENC 0x8 +#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0 +#define LED_LINK_SPEED_ENC_M10_SHIFT 3 +#define LED_LINK_SPEED_ENC_M100_SHIFT 6 +#define LED_LINK_SPEED_ENC_M1000_SHIFT 9 +#define LED_LINK_SPEED_ENC_M2500_SHIFT 12 +#define LED_LINK_SPEED_ENC_M10G_SHIFT 15 +#define LED_LINK_SPEED_ENC_MASK 0x7 + /* Relative to REG_RGMII_CNTRL */ #define RGMII_MODE_EN (1 << 0) #define ID_MODE_DIS (1 << 1) @@ -73,10 +134,6 @@ enum bcm_sf2_reg_offs { #define LPI_COUNT_SHIFT 9 #define LPI_COUNT_MASK 0x3F -#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x)) - -#define SPDLNK_SRC_SEL (1 << 24) - /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ #define INTRL2_CPU_STATUS 0x00 #define INTRL2_CPU_SET 0x04 diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index e638e3eea911..33daaf10c488 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -167,19 +167,20 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, } static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", - __func__, port, bridge->name); + __func__, port, bridge.dev->name); return 0; } static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", - __func__, port, bridge->name); + __func__, port, bridge.dev->name); } static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 4e0b53d94b52..726f267cb228 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -674,7 +674,8 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, } static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct hellcreek *hellcreek = ds->priv; @@ -691,7 +692,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, } static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct hellcreek *hellcreek = ds->priv; @@ -710,8 +711,9 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek, u16 meta = 0; dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, " - "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask, - entry->is_obt, entry->reprio_en, entry->reprio_tc); + "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, + entry->portmask, entry->is_obt, entry->pass_blocked, + entry->reprio_en, entry->reprio_tc); /* Add mac address */ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH); @@ -722,6 +724,8 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek, meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT; if (entry->is_obt) meta |= HR_FDBWRM0_OBT; + if (entry->pass_blocked) + meta |= HR_FDBWRM0_PASS_BLOCKED; if (entry->reprio_en) { meta |= HR_FDBWRM0_REPRIO_EN; meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT; @@ -1049,7 +1053,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek) static int hellcreek_setup_fdb(struct hellcreek *hellcreek) { - static struct hellcreek_fdb_entry ptp = { + static struct hellcreek_fdb_entry l2_ptp = { /* MAC: 01-1B-19-00-00-00 */ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }, .portmask = 0x03, /* Management ports */ @@ -1060,24 +1064,94 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek) .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ .reprio_en = 1, }; - static struct hellcreek_fdb_entry p2p = { + static struct hellcreek_fdb_entry udp4_ptp = { + /* MAC: 01-00-5E-00-01-81 */ + .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry udp6_ptp = { + /* MAC: 33-33-00-00-01-81 */ + .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry l2_p2p = { /* MAC: 01-80-C2-00-00-0E */ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }, .portmask = 0x03, /* Management ports */ .age = 0, .is_obt = 0, - .pass_blocked = 0, + .pass_blocked = 1, .is_static = 1, .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ .reprio_en = 1, }; + static struct hellcreek_fdb_entry udp4_p2p = { + /* MAC: 01-00-5E-00-00-6B */ + .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry udp6_p2p = { + /* MAC: 33-33-00-00-00-6B */ + .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry stp = { + /* MAC: 01-80-C2-00-00-00 */ + .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 1, + .is_static = 1, + .reprio_tc = 6, + .reprio_en = 1, + }; int ret; mutex_lock(&hellcreek->reg_lock); - ret = __hellcreek_fdb_add(hellcreek, &ptp); + ret = __hellcreek_fdb_add(hellcreek, &l2_ptp); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp); if (ret) goto out; - ret = __hellcreek_fdb_add(hellcreek, &p2p); + ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &l2_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &stp); out: mutex_unlock(&hellcreek->reg_lock); @@ -1384,14 +1458,19 @@ static void hellcreek_teardown(struct dsa_switch *ds) dsa_devlink_resources_unregister(ds); } -static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct hellcreek *hellcreek = ds->priv; - dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port); + __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces); + + /* Include GMII - the hardware does not support this interface + * mode, but it's the default interface mode for phylib, so we + * need it for compatibility with existing DT. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); /* The MAC settings are a hardware configuration option and cannot be * changed at run time or by strapping. Therefore the attached PHYs @@ -1399,12 +1478,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, * by the hardware. */ if (hellcreek->pdata->is_100_mbits) - phylink_set(mask, 100baseT_Full); + config->mac_capabilities = MAC_100FD; else - phylink_set(mask, 1000baseT_Full); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + config->mac_capabilities = MAC_1000FD; } static int @@ -1755,7 +1831,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = { .get_strings = hellcreek_get_strings, .get_tag_protocol = hellcreek_get_tag_protocol, .get_ts_info = hellcreek_get_ts_info, - .phylink_validate = hellcreek_phylink_validate, + .phylink_get_caps = hellcreek_phylink_get_caps, .port_bridge_flags = hellcreek_bridge_flags, .port_bridge_join = hellcreek_port_bridge_join, .port_bridge_leave = hellcreek_port_bridge_leave, diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index 40b41c794dfa..b3bc948d6145 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port, */ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state); - /* Reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_ON: tx_tstamp_enable = true; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 89f920289ae2..d55784d19fa4 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1103,12 +1103,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(port %d)\n", __func__, port); - if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) { + if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) { lan9303_bridge_ports(chip); chip->is_bridged = true; /* unleash stp_state_set() */ } @@ -1117,7 +1118,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, } static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct lan9303 *chip = ds->priv; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 7056d98d8177..46ed953e787e 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -759,7 +759,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack) { - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; /* Do not allow changing the VLAN filtering options while in bridge */ @@ -1146,16 +1146,18 @@ static int gswip_vlan_remove(struct gswip_priv *priv, } static int gswip_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { + struct net_device *br = bridge.dev; struct gswip_priv *priv = ds->priv; int err; /* When the bridge uses VLAN filtering we have to configure VLAN * specific bridges. No bridge is configured here. */ - if (!br_vlan_enabled(bridge)) { - err = gswip_vlan_add_unaware(priv, bridge, port); + if (!br_vlan_enabled(br)) { + err = gswip_vlan_add_unaware(priv, br, port); if (err) return err; priv->port_vlan_filter &= ~BIT(port); @@ -1166,8 +1168,9 @@ static int gswip_port_bridge_join(struct dsa_switch *ds, int port, } static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { + struct net_device *br = bridge.dev; struct gswip_priv *priv = ds->priv; gswip_add_single_port_br(priv, port, true); @@ -1175,16 +1178,16 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, /* When the bridge uses VLAN filtering we have to configure VLAN * specific bridges. No bridge is configured here. */ - if (!br_vlan_enabled(bridge)) - gswip_vlan_remove(priv, bridge, port, 0, true, false); + if (!br_vlan_enabled(br)) + gswip_vlan_remove(priv, br, port, 0, true, false); } static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; unsigned int max_ports = priv->hw_info->max_ports; int pos = max_ports; int i, idx = -1; @@ -1229,8 +1232,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; int err; @@ -1254,8 +1257,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port, static int gswip_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; /* We have to receive all packets on the CPU port and should not @@ -1340,8 +1343,8 @@ static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) static int gswip_port_fdb(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, bool add) { + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; - struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; struct gswip_pce_table_entry mac_bridge = {0,}; unsigned int cpu_port = priv->hw_info->cpu_port; int fid = -1; @@ -1438,114 +1441,70 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } -static void gswip_phylink_set_capab(unsigned long *supported, - struct phylink_link_state *state) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - /* With the exclusion of MII, Reverse MII and Reduced MII, we - * support Gigabit, including Half duplex - */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); - } - - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - -static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { switch (port) { case 0: case 1: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_REVMII && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_REVMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; + case 2: case 3: case 4: - if (state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; + case 5: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; - default: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; } - gswip_phylink_set_capab(supported, state); - - return; - -unsupported: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; } -static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { switch (port) { case 0: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_GMII && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; + case 1: case 2: case 3: case 4: - if (state->interface != PHY_INTERFACE_MODE_INTERNAL) - goto unsupported; + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); break; + case 5: - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_INTERNAL && - state->interface != PHY_INTERFACE_MODE_RMII) - goto unsupported; + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); break; - default: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported port: %i\n", port); - return; } - gswip_phylink_set_capab(supported, state); - - return; - -unsupported: - linkmode_zero(supported); - dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", - phy_modes(state->interface), port); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; } static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) @@ -1827,7 +1786,7 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_xrx200_phylink_validate, + .phylink_get_caps = gswip_xrx200_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, @@ -1851,7 +1810,7 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = { .port_fdb_add = gswip_port_fdb_add, .port_fdb_del = gswip_port_fdb_del, .port_fdb_dump = gswip_port_fdb_dump, - .phylink_validate = gswip_xrx300_phylink_validate, + .phylink_get_caps = gswip_xrx300_phylink_get_caps, .phylink_mac_config = gswip_phylink_mac_config, .phylink_mac_link_down = gswip_phylink_mac_link_down, .phylink_mac_link_up = gswip_phylink_mac_link_up, diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 43fc3087aeb3..991b9c6b6ce7 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio.h> +#include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_data/microchip-ksz.h> @@ -1002,57 +1003,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) data &= ~PORT_VLAN_MEMBERSHIP; data |= (member & dev->port_mask); ksz_pwrite8(dev, port, P_MIRROR_CTRL, data); - dev->ports[port].member = member; } static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct ksz_device *dev = ds->priv; - int forward = dev->member; struct ksz_port *p; - int member = -1; u8 data; - p = &dev->ports[port]; - ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - /* Port is a member of a bridge. */ - if (dev->br_member & BIT(port)) { - dev->member |= BIT(port); - member = dev->member; - } else { - member = dev->host_mask | p->vid_member; - } break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port < dev->phy_port_cnt && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -1060,22 +1036,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) } ksz_pwrite8(dev, port, P_STP_CTRL, data); + + p = &dev->ports[port]; p->stp_state = state; - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz8_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & BIT(port)) - dev->member &= ~BIT(port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); + ksz_update_port_member(dev, port); } static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1341,7 +1306,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; struct ksz8 *ksz8 = dev->priv; const u32 *masks; u8 member; @@ -1368,10 +1333,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) if (!ksz_is_ksz88x3(dev)) ksz8795_cpu_interface_select(dev, port); - member = dev->port_mask; + member = dsa_user_ports(ds); } else { - member = dev->host_mask | p->vid_member; + member = BIT(dsa_upstream_port(ds, port)); } + ksz8_cfg_port_member(dev, port, member); } @@ -1392,20 +1358,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds) ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true); p = &dev->ports[dev->cpu_port]; - p->vid_member = dev->port_mask; p->on = 1; ksz8_port_setup(dev, dev->cpu_port, true); - dev->member = dev->host_mask; for (i = 0; i < dev->phy_port_cnt; i++) { p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = BIT(i); - p->member = dev->port_mask; ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED); /* Last port may be disabled. */ diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 854e25f43fa7..353b5f981740 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) { ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); - dev->ports[port].member = member; } static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, @@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; struct ksz_port *p = &dev->ports[port]; u8 data; - int member = -1; - int forward = dev->member; ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); @@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port) - member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; case BR_STATE_LEARNING: data |= PORT_RX_ENABLE; break; case BR_STATE_FORWARDING: data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - - /* This function is also used internally. */ - if (port == dev->cpu_port) - break; - - member = dev->host_mask | p->vid_member; - mutex_lock(&dev->dev_mutex); - - /* Port is a member of a bridge. */ - if (dev->br_member & (1 << port)) { - dev->member |= (1 << port); - member = dev->member; - } - mutex_unlock(&dev->dev_mutex); break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port != dev->cpu_port && - p->stp_state == BR_STATE_DISABLED) - member = dev->host_mask | p->vid_member; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; - mutex_lock(&dev->dev_mutex); - /* Port membership may share register with STP state. */ - if (member >= 0 && member != p->member) - ksz9477_cfg_port_member(dev, port, (u8)member); - - /* Check if forwarding needs to be updated. */ - if (state != BR_STATE_FORWARDING) { - if (dev->br_member & (1 << port)) - dev->member &= ~(1 << port); - } - /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. - */ - if (forward != dev->member) - ksz_update_port_member(dev, port); - mutex_unlock(&dev->dev_mutex); + ksz_update_port_member(dev, port); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { - u8 data8; - u8 member; - u16 data16; struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 data8, member; + u16 data16; /* enable tag tail for host port */ if (cpu_port) @@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); p->phydev.duplex = 1; } - mutex_lock(&dev->dev_mutex); + if (cpu_port) - member = dev->port_mask; + member = dsa_user_ports(ds); else - member = dev->host_mask | p->vid_member; - mutex_unlock(&dev->dev_mutex); + member = BIT(dsa_upstream_port(ds, port)); + ksz9477_cfg_port_member(dev, port, member); /* clear pending interrupts */ @@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) const char *prev_mode; dev->cpu_port = i; - dev->host_mask = (1 << dev->cpu_port); - dev->port_mask |= dev->host_mask; p = &dev->ports[i]; /* Read from XMII register to determine host port @@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) /* enable cpu port */ ksz9477_port_setup(dev, i, true); - p->vid_member = dev->port_mask; p->on = 1; } } - dev->member = dev->host_mask; - for (i = 0; i < dev->port_cnt; i++) { if (i == dev->cpu_port) continue; p = &dev->ports[i]; - /* Initialize to non-zero so that ksz_cfg_port_member() will - * be called. - */ - p->vid_member = (1 << i); - p->member = dev->port_mask; ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED); p->on = 1; if (i < dev->phy_port_cnt) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 7c2968a639eb..55dbda04ea62 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -22,21 +22,40 @@ void ksz_update_port_member(struct ksz_device *dev, int port) { - struct ksz_port *p; + struct ksz_port *p = &dev->ports[port]; + struct dsa_switch *ds = dev->ds; + u8 port_member = 0, cpu_port; + const struct dsa_port *dp; int i; - for (i = 0; i < dev->port_cnt; i++) { - if (i == port || i == dev->cpu_port) + if (!dsa_is_user_port(ds, port)) + return; + + dp = dsa_to_port(ds, port); + cpu_port = BIT(dsa_upstream_port(ds, port)); + + for (i = 0; i < ds->num_ports; i++) { + const struct dsa_port *other_dp = dsa_to_port(ds, i); + struct ksz_port *other_p = &dev->ports[i]; + u8 val = 0; + + if (!dsa_is_user_port(ds, i)) continue; - p = &dev->ports[i]; - if (!(dev->member & (1 << i))) + if (port == i) + continue; + if (!dsa_port_bridge_same(dp, other_dp)) continue; - /* Port is a member of the bridge and is forwarding. */ - if (p->stp_state == BR_STATE_FORWARDING && - p->member != dev->member) - dev->dev_ops->cfg_port_member(dev, i, dev->member); + if (other_p->stp_state == BR_STATE_FORWARDING && + p->stp_state == BR_STATE_FORWARDING) { + val |= BIT(port); + port_member |= BIT(i); + } + + dev->dev_ops->cfg_port_member(dev, i, val | cpu_port); } + + dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port); } EXPORT_SYMBOL_GPL(ksz_update_port_member); @@ -173,14 +192,9 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf) EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); int ksz_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member |= (1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. */ @@ -190,15 +204,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(ksz_port_bridge_join); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { - struct ksz_device *dev = ds->priv; - - mutex_lock(&dev->dev_mutex); - dev->br_member &= ~(1 << port); - dev->member &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); - /* port_stp_state_set() will be called after to put the port in * forwarding state so there is no need to do anything. */ @@ -295,7 +302,6 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; struct alu_struct alu; int index; - int ret = 0; for (index = 0; index < dev->num_statics; index++) { if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { @@ -317,7 +323,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port, dev->dev_ops->w_sta_mac_table(dev, index, &alu); exit: - return ret; + return 0; } EXPORT_SYMBOL_GPL(ksz_port_mdb_del); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 1597c63988b4..df8ae59c8525 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -25,8 +25,6 @@ struct ksz_port_mib { }; struct ksz_port { - u16 member; - u16 vid_member; bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ int stp_state; struct phy_device phydev; @@ -83,8 +81,6 @@ struct ksz_device { struct ksz_port *ports; struct delayed_work mib_read; unsigned long mib_read_interval; - u16 br_member; - u16 member; u16 mirror_rx; u16 mirror_tx; u32 features; /* chip specific features */ @@ -159,9 +155,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, int ksz_sset_count(struct dsa_switch *ds, int port, int sset); void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br); + struct dsa_bridge bridge, bool *tx_fwd_offload); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br); + struct dsa_bridge bridge); void ksz_port_fast_age(struct dsa_switch *ds, int port); int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 9890672a206d..b82512e5b33b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1186,29 +1186,33 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, static int mt7530_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, bool *tx_fwd_offload) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; u32 port_bitmap = BIT(MT7530_CPU_PORT); - int i; + struct mt7530_priv *priv = ds->priv; mutex_lock(&priv->reg_mutex); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + dsa_switch_for_each_user_port(other_dp, ds) { + int other_port = other_dp->index; + + if (dp == other_dp) + continue; + /* Add this port to the port matrix of the other ports in the * same bridge. If the port is disabled, port matrix is kept * and not being setup until the port becomes enabled. */ - if (dsa_is_user_port(ds, i) && i != port) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) - continue; - if (priv->ports[i].enable) - mt7530_set(priv, MT7530_PCR_P(i), - PCR_MATRIX(BIT(port))); - priv->ports[i].pm |= PCR_MATRIX(BIT(port)); + if (!dsa_port_offloads_bridge(other_dp, &bridge)) + continue; - port_bitmap |= BIT(i); - } + if (priv->ports[other_port].enable) + mt7530_set(priv, MT7530_PCR_P(other_port), + PCR_MATRIX(BIT(port))); + priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); + + port_bitmap |= BIT(other_port); } /* Add the all other ports to this port matrix. */ @@ -1236,7 +1240,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) /* This is called after .port_bridge_leave when leaving a VLAN-aware * bridge. Don't set standalone ports to fallback mode. */ - if (dsa_to_port(ds, port)->bridge_dev) + if (dsa_port_bridge_dev_get(dsa_to_port(ds, port))) mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, MT7530_PORT_FALLBACK_MODE); @@ -1299,26 +1303,30 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port) static void mt7530_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct mt7530_priv *priv = ds->priv; - int i; mutex_lock(&priv->reg_mutex); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + dsa_switch_for_each_user_port(other_dp, ds) { + int other_port = other_dp->index; + + if (dp == other_dp) + continue; + /* Remove this port from the port matrix of the other ports * in the same bridge. If the port is disabled, port matrix * is kept and not being setup until the port becomes enabled. */ - if (dsa_is_user_port(ds, i) && i != port) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) - continue; - if (priv->ports[i].enable) - mt7530_clear(priv, MT7530_PCR_P(i), - PCR_MATRIX(BIT(port))); - priv->ports[i].pm &= ~PCR_MATRIX(BIT(port)); - } + if (!dsa_port_offloads_bridge(other_dp, &bridge)) + continue; + + if (priv->ports[other_port].enable) + mt7530_clear(priv, MT7530_PCR_P(other_port), + PCR_MATRIX(BIT(port))); + priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); } /* Set the cpu port to be the only one in the port matrix of diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f00cbf5753b9..58ca684d73f7 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port) u16 reg; int err; + /* The 88e6250 family does not have the PHY detect bit. Instead, + * report whether the port is internal. + */ + if (chip->info->family == MV88E6XXX_FAMILY_6250) + return port < chip->info->num_internal_phys; + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) { dev_err(chip->dev, @@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port *p; - int err; + int err = 0; p = &chip->ports[port]; - /* FIXME: is this the correct test? If we're in fixed mode on an - * internal port, why should we process this any different from - * PHY mode? On the other hand, the port may be automedia between - * an internal PHY and the serdes... - */ - if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) - return; - mv88e6xxx_reg_lock(chip); - /* In inband mode, the link may come up at any time while the link - * is not forced down. Force the link down while we reconfigure the - * interface mode. - */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); - - err = mv88e6xxx_port_config_interface(chip, port, state->interface); - if (err && err != -EOPNOTSUPP) - goto err_unlock; - err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface, - state->advertising); - /* FIXME: we should restart negotiation if something changed - which - * is something we get if we convert to using phylinks PCS operations. - */ - if (err > 0) - err = 0; + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { + /* In inband mode, the link may come up at any time while the + * link is not forced down. Force the link down while we + * reconfigure the interface mode. + */ + if (mode == MLO_AN_INBAND && + p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + + err = mv88e6xxx_port_config_interface(chip, port, + state->interface); + if (err && err != -EOPNOTSUPP) + goto err_unlock; + + err = mv88e6xxx_serdes_pcs_config(chip, port, mode, + state->interface, + state->advertising); + /* FIXME: we should restart negotiation if something changed - + * which is something we get if we convert to using phylinks + * PCS operations. + */ + if (err > 0) + err = 0; + } /* Undo the forced down state above after completing configuration - * irrespective of its state on entry, which allows the link to come up. + * irrespective of its state on entry, which allows the link to come + * up in the in-band case where there is no separate SERDES. Also + * ensure that the link can come up if the PPU is in use and we are + * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ - if (mode == MLO_AN_INBAND && p->interface != state->interface && - chip->info->ops->port_set_link) + if (chip->info->ops->port_set_link && + ((mode == MLO_AN_INBAND && p->interface != state->interface) || + (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); p->interface = state->interface; @@ -752,13 +762,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Force the link down if we know the port may not be automatically + * updated by the switch or if we are using fixed-link mode. */ - if (((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if ((!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) && ops->port_sync_link) err = ops->port_sync_link(chip, port, mode, false); + + if (!err && ops->port_set_speed_duplex) + err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED, + DUPLEX_UNFORCED); mv88e6xxx_reg_unlock(chip); if (err) @@ -779,11 +792,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, ops = chip->info->ops; mv88e6xxx_reg_lock(chip); - /* Internal PHYs propagate their configuration directly to the MAC. - * External PHYs depend on whether the PPU is enabled for this port. + /* Configure and force the link up if we know that the port may not + * automatically updated by the switch or if we are using fixed-link + * mode. */ - if ((!mv88e6xxx_phy_is_internal(ds, port) && - !mv88e6xxx_port_ppu_updates(chip, port)) || + if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) { /* FIXME: for an automedia port, should we force the link * down here - what if the link comes up due to "other" media @@ -1228,8 +1241,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) { struct dsa_switch *ds = chip->ds; struct dsa_switch_tree *dst = ds->dst; - struct net_device *br; - struct dsa_port *dp; + struct dsa_port *dp, *other_dp; bool found = false; u16 pvlan; @@ -1238,11 +1250,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) list_for_each_entry(dp, &dst->ports, list) { if (dp->ds->index == dev && dp->index == port) { /* dp might be a DSA link or a user port, so it - * might or might not have a bridge_dev - * pointer. Use the "found" variable for both - * cases. + * might or might not have a bridge. + * Use the "found" variable for both cases. */ - br = dp->bridge_dev; found = true; break; } @@ -1250,13 +1260,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) /* dev is a virtual bridge */ } else { list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_num < 0) + unsigned int bridge_num = dsa_port_bridge_num_get(dp); + + if (!bridge_num) continue; - if (dp->bridge_num + 1 + dst->last_switch != dev) + if (bridge_num + dst->last_switch != dev) continue; - br = dp->bridge_dev; found = true; break; } @@ -1275,12 +1286,11 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) /* Frames from user ports can egress any local DSA links and CPU ports, * as well as any local member of their bridge group. */ - list_for_each_entry(dp, &dst->ports, list) - if (dp->ds == ds && - (dp->type == DSA_PORT_TYPE_CPU || - dp->type == DSA_PORT_TYPE_DSA || - (br && dp->bridge_dev == br))) - pvlan |= BIT(dp->index); + dsa_switch_for_each_port(other_dp, ds) + if (other_dp->type == DSA_PORT_TYPE_CPU || + other_dp->type == DSA_PORT_TYPE_DSA || + dsa_port_bridge_same(dp, other_dp)) + pvlan |= BIT(other_dp->index); return pvlan; } @@ -1647,12 +1657,13 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid) { + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_vtu_entry vlan; - int i, err; + int err; /* DSA and CPU ports have to be members of multiple vlans */ - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) return 0; err = mv88e6xxx_vtu_get(chip, vid, &vlan); @@ -1662,27 +1673,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (!vlan.valid) return 0; - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) { - if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) - continue; + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *other_br; - if (!dsa_to_port(ds, i)->slave) - continue; - - if (vlan.member[i] == + if (vlan.member[other_dp->index] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) continue; - if (dsa_to_port(ds, i)->bridge_dev == - dsa_to_port(ds, port)->bridge_dev) + if (dsa_port_bridge_same(dp, other_dp)) break; /* same bridge, check next VLAN */ - if (!dsa_to_port(ds, i)->bridge_dev) + other_br = dsa_port_bridge_dev_get(other_dp); + if (!other_br) continue; dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n", - port, vlan.vid, i, - netdev_name(dsa_to_port(ds, i)->bridge_dev)); + port, vlan.vid, other_dp->index, netdev_name(other_br)); return -EOPNOTSUPP; } @@ -1692,13 +1698,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port) { struct dsa_port *dp = dsa_to_port(chip->ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct mv88e6xxx_port *p = &chip->ports[port]; u16 pvid = MV88E6XXX_VID_STANDALONE; bool drop_untagged = false; int err; - if (dp->bridge_dev) { - if (br_vlan_enabled(dp->bridge_dev)) { + if (br) { + if (br_vlan_enabled(br)) { pvid = p->bridge_pvid.vid; drop_untagged = !p->bridge_pvid.valid; } else { @@ -2416,7 +2423,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, } static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, - struct net_device *br) + struct dsa_bridge bridge) { struct dsa_switch *ds = chip->ds; struct dsa_switch_tree *dst = ds->dst; @@ -2424,7 +2431,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, int err; list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_dev == br) { + if (dsa_port_offloads_bridge(dp, &bridge)) { if (dp->ds == ds) { /* This is a local bridge group member, * remap its Port VLAN Map. @@ -2447,15 +2454,29 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, return 0; } +/* Treat the software bridge as a virtual single-port switch behind the + * CPU and map in the PVT. First dst->last_switch elements are taken by + * physical switches, so start from beyond that range. + */ +static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, + unsigned int bridge_num) +{ + u8 dev = bridge_num + ds->dst->last_switch; + struct mv88e6xxx_chip *chip = ds->priv; + + return mv88e6xxx_pvt_map(chip, dev, 0); +} + static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct mv88e6xxx_chip *chip = ds->priv; int err; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_bridge_map(chip, br); + err = mv88e6xxx_bridge_map(chip, bridge); if (err) goto unlock; @@ -2463,6 +2484,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, if (err) goto unlock; + if (mv88e6xxx_has_pvt(chip)) { + err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num); + if (err) + goto unlock; + + *tx_fwd_offload = true; + } + unlock: mv88e6xxx_reg_unlock(chip); @@ -2470,14 +2499,18 @@ unlock: } static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; int err; mv88e6xxx_reg_lock(chip); - if (mv88e6xxx_bridge_map(chip, br) || + if (bridge.tx_fwd_offload && + mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num)) + dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); + + if (mv88e6xxx_bridge_map(chip, bridge) || mv88e6xxx_port_vlan_map(chip, port)) dev_err(ds->dev, "failed to remap in-chip Port VLAN\n"); @@ -2492,7 +2525,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int tree_index, int sw_index, - int port, struct net_device *br) + int port, struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; int err; @@ -2502,6 +2535,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_pvt_map(chip, sw_index, port); + err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num); mv88e6xxx_reg_unlock(chip); return err; @@ -2509,7 +2543,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int tree_index, int sw_index, - int port, struct net_device *br) + int port, struct dsa_bridge bridge) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2517,49 +2551,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, return; mv88e6xxx_reg_lock(chip); - if (mv88e6xxx_pvt_map(chip, sw_index, port)) + if (mv88e6xxx_pvt_map(chip, sw_index, port) || + mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num)) dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); mv88e6xxx_reg_unlock(chip); } -/* Treat the software bridge as a virtual single-port switch behind the - * CPU and map in the PVT. First dst->last_switch elements are taken by - * physical switches, so start from beyond that range. - */ -static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds, - int bridge_num) -{ - u8 dev = bridge_num + ds->dst->last_switch + 1; - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_pvt_map(chip, dev, 0); - mv88e6xxx_reg_unlock(chip); - - return err; -} - -static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) -{ - return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); -} - -static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct net_device *br, - int bridge_num) -{ - int err; - - err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num); - if (err) { - dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n", - ERR_PTR(err)); - } -} - static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) { if (chip->info->ops->reset) @@ -3186,8 +3183,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) * time. */ if (mv88e6xxx_has_pvt(chip)) - ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES - - ds->dst->last_switch - 1; + ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES - + ds->dst->last_switch - 1; mv88e6xxx_reg_lock(chip); @@ -6279,8 +6276,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .crosschip_lag_change = mv88e6xxx_crosschip_lag_change, .crosschip_lag_join = mv88e6xxx_crosschip_lag_join, .crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave, - .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload, - .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload, }; static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 8f74ffc7a279..389f8a6ec0ab 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, */ clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state); - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tstamp_enable = false; diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index d9817b20ea64..ab41619a809b 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -283,7 +283,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip, if (err) return err; - if (speed) + if (speed != SPEED_UNFORCED) dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed); else dev_dbg(chip->dev, "p%d: Speed unforced\n", port); @@ -516,7 +516,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, if (err) return err; - if (speed) + if (speed != SPEED_UNFORCED) dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed); else dev_dbg(chip->dev, "p%d: Speed unforced\n", port); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 6ea003678798..2b05ead515cd 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 status, u16 lpa, + u16 ctrl, u16 status, u16 lpa, struct phylink_link_state *state) { + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { - state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + /* The Spped and Duplex Resolved register is 1 if AN is enabled + * and complete, or if AN is disabled. So with disabled AN we + * still get here on link up. But we want to set an_complete + * only if AN was enabled, thus we look at BMCR_ANENABLE. + * (According to 802.3-2008 section 22.2.4.2.10, we should be + * able to get this same value from BMSR_ANEGCAPABLE, but tests + * show that these Marvell PHYs don't conform to this part of + * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + */ + state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, dev_err(chip->dev, "invalid PHY speed\n"); return -EINVAL; } + } else if (state->link && + state->interface != PHY_INTERFACE_MODE_SGMII) { + /* If Speed and Duplex Resolved register is 0 and link is up, it + * means that AN was enabled, but link partner had it disabled + * and the PHY invoked the Auto-Negotiation Bypass feature and + * linked anyway. + */ + state->duplex = DUPLEX_FULL; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_1000; } else { state->link = false; } @@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; + err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + err = mv88e6352_serdes_read(chip, 0x11, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool up) { u8 cmode = chip->ports[port].cmode; - int err = 0; + int err; switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: @@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, case MV88E6XXX_PORT_STS_CMODE_RXAUI: err = mv88e6390_serdes_power_10g(chip, lane, up); break; + default: + err = -EINVAL; + break; } if (!err && up) @@ -883,10 +915,17 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status; + u16 lpa, status, ctrl; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_BMCR, &ctrl); + if (err) { + dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + return err; + } + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, MV88E6390_SGMII_PHY_STATUS, &status); if (err) { dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err); @@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, @@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) } } -static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane, + bool on) { - u16 reg, pcs; + u16 reg; + int err; + + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, ®); + if (err) + return err; + + if (on) + reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN); + else + reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN | + MV88E6393X_SERDES_CTRL1_RX_PDOWN; + + return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_CTRL1, reg); +} + +static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg; int err; /* mv88e6393x family errata 4.6: @@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) * It seems that after this workaround the SERDES is automatically * powered up (the bit is cleared), so power it down. */ - if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE || - lane == MV88E6393X_PORT10_LANE) { - err = mv88e6390_serdes_read(chip, lane, - MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, ®); - if (err) - return err; + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; - reg &= ~MV88E6393X_SERDES_POC_PDOWN; - reg |= MV88E6393X_SERDES_POC_RESET; + reg &= ~MV88E6393X_SERDES_POC_PDOWN; + reg |= MV88E6393X_SERDES_POC_RESET; - err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, - MV88E6393X_SERDES_POC, reg); - if (err) - return err; + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); + if (err) + return err; - err = mv88e6390_serdes_power_sgmii(chip, lane, false); - if (err) - return err; - } + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + + return mv88e6393x_serdes_power_lane(chip, lane, false); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE); + if (err) + return err; + + return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE); +} + +static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; /* mv88e6393x family errata 4.8: * When a SERDES port is operating in 1000BASE-X or SGMII mode link may @@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) MV88E6393X_ERRATA_4_8_REG, reg); } -int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane, + u8 cmode) +{ + static const struct { + u16 dev, reg, val, mask; + } fixes[] = { + { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff }, + { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff }, + { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff }, + { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f }, + { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 }, + { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff }, + { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, + MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET }, + }; + int err, i; + u16 reg; + + /* mv88e6393x family errata 5.2: + * For optimal signal integrity the following sequence should be applied + * to SERDES operating in 10G mode. These registers only apply to 10G + * operation and have no effect on other speeds. + */ + if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER) + return 0; + + for (i = 0; i < ARRAY_SIZE(fixes); ++i) { + err = mv88e6390_serdes_read(chip, lane, fixes[i].dev, + fixes[i].reg, ®); + if (err) + return err; + + reg &= ~fixes[i].mask; + reg |= fixes[i].val; + + err = mv88e6390_serdes_write(chip, lane, fixes[i].dev, + fixes[i].reg, reg); + if (err) + return err; + } + + return 0; +} + +static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip, + int lane, u8 cmode, bool on) { + u16 reg; int err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE); + if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return 0; + + /* Inband AN is broken on Amethyst in 2500base-x mode when set by + * standard mechanism (via cmode). + * We can get around this by configuring the PCS mode to 1000base-x + * and then writing value 0x58 to register 1e.8000. (This must be done + * while SerDes receiver and transmitter are disabled, which is, when + * this function is called.) + * It seem that when we do this configuration to 2500base-x mode (by + * changing PCS mode to 1000base-x and frequency to 3.125 GHz from + * 1.25 GHz) and then configure to sgmii or 1000base-x, the device + * thinks that it already has SerDes at 1.25 GHz and does not change + * the 1e.8000 register, leaving SerDes at 3.125 GHz. + * To avoid this, change PCS mode back to 2500base-x when disabling + * SerDes from 2500base-x mode. + */ + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, ®); + if (err) + return err; + + reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN); + if (on) + reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX | + MV88E6393X_SERDES_POC_AN; + else + reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX; + reg |= MV88E6393X_SERDES_POC_RESET; + + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS, + MV88E6393X_SERDES_POC, reg); if (err) return err; - err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE); + err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58); if (err) return err; - return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE); + return 0; } int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, bool on) { u8 cmode = chip->ports[port].cmode; + int err; if (port != 0 && port != 9 && port != 10) return -EOPNOTSUPP; + if (on) { + err = mv88e6393x_serdes_erratum_4_8(chip, lane); + if (err) + return err; + + err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + true); + if (err) + return err; + + err = mv88e6393x_serdes_power_lane(chip, lane, true); + if (err) + return err; + } + switch (cmode) { case MV88E6XXX_PORT_STS_CMODE_SGMII: case MV88E6XXX_PORT_STS_CMODE_1000BASEX: case MV88E6XXX_PORT_STS_CMODE_2500BASEX: - return mv88e6390_serdes_power_sgmii(chip, lane, on); + err = mv88e6390_serdes_power_sgmii(chip, lane, on); + break; case MV88E6393X_PORT_STS_CMODE_5GBASER: case MV88E6393X_PORT_STS_CMODE_10GBASER: - return mv88e6390_serdes_power_10g(chip, lane, on); + err = mv88e6390_serdes_power_10g(chip, lane, on); + break; + default: + err = -EINVAL; + break; } - return 0; + if (err) + return err; + + if (!on) { + err = mv88e6393x_serdes_power_lane(chip, lane, false); + if (err) + return err; + + err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode, + false); + } + + return err; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index cbb3ba30caea..8dd8ed225b45 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -93,6 +93,10 @@ #define MV88E6393X_SERDES_POC_PCS_MASK 0x0007 #define MV88E6393X_SERDES_POC_RESET BIT(15) #define MV88E6393X_SERDES_POC_PDOWN BIT(5) +#define MV88E6393X_SERDES_POC_AN BIT(3) +#define MV88E6393X_SERDES_CTRL1 0xf003 +#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9) +#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8) #define MV88E6393X_ERRATA_4_8_REG 0xF074 #define MV88E6393X_ERRATA_4_8_BIT BIT(14) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 9948544ba1c4..220b0b027b55 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -21,6 +21,7 @@ config NET_DSA_MSCC_SEVILLE depends on NET_VENDOR_MICROSEMI depends on HAS_IOMEM depends on PTP_1588_CLOCK_OPTIONAL + select MDIO_MSCC_MIIM select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT_8021Q select NET_DSA_TAG_OCELOT diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 327cc4654806..9957772201d5 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -21,7 +21,6 @@ #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> -#include <linux/pcs-lynx.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" @@ -240,24 +239,32 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) */ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->is_dsa_8021q_cpu = true; ocelot->npi = -1; /* Overwrite PGID_CPU with the non-tagging port */ ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); } static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->is_dsa_8021q_cpu = false; /* Restore PGID_CPU */ ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, PGID_CPU); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); } /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. @@ -290,8 +297,11 @@ static int felix_setup_mmio_filtering(struct felix *felix) } } - if (cpu < 0) + if (cpu < 0) { + kfree(tagging_rule); + kfree(redirect_rule); return -EINVAL; + } tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); @@ -629,6 +639,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds, return 0; } +static void felix_port_fast_age(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_mact_flush(ocelot, port); + if (err) + dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", + port, ERR_PTR(err)); +} + static int felix_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -698,21 +719,21 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, } static int felix_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, bool *tx_fwd_offload) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_join(ocelot, port, br); + ocelot_port_bridge_join(ocelot, port, bridge.dev); return 0; } static void felix_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_leave(ocelot, port, br); + ocelot_port_bridge_leave(ocelot, port, bridge.dev); } static int felix_lag_join(struct dsa_switch *ds, int port, @@ -820,8 +841,8 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp = dsa_to_port(ds, port); - if (felix->pcs[port]) - phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); + if (felix->pcs && felix->pcs[port]) + phylink_set_pcs(dp->pl, felix->pcs[port]); } static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@ -989,6 +1010,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; + ocelot->vcap_pol.base = felix->info->vcap_pol_base; + ocelot->vcap_pol.max = felix->info->vcap_pol_max; + ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; + ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; ocelot->ops = felix->info->ops; ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; @@ -1016,7 +1041,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += felix->switch_base; res.end += felix->switch_base; - target = ocelot_regmap_init(ocelot, &res); + target = felix->info->init_regmap(ocelot, &res); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map device memory space\n"); @@ -1053,7 +1078,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += felix->switch_base; res.end += felix->switch_base; - target = ocelot_regmap_init(ocelot, &res); + target = felix->info->init_regmap(ocelot, &res); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map memory space for port %d\n", @@ -1140,38 +1165,22 @@ static void felix_port_deferred_xmit(struct kthread_work *work) kfree(xmit_work); } -static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) +static int felix_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) { - struct dsa_port *dp = dsa_to_port(ds, port); - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - struct felix_port *felix_port; + struct ocelot_8021q_tagger_data *tagger_data; - if (!dsa_port_is_user(dp)) + switch (proto) { + case DSA_TAG_PROTO_OCELOT_8021Q: + tagger_data = ocelot_8021q_tagger_data(ds); + tagger_data->xmit_work_fn = felix_port_deferred_xmit; return 0; - - felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); - if (!felix_port) - return -ENOMEM; - - felix_port->xmit_worker = felix->xmit_worker; - felix_port->xmit_work_fn = felix_port_deferred_xmit; - - dp->priv = felix_port; - - return 0; -} - -static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) -{ - struct dsa_port *dp = dsa_to_port(ds, port); - struct felix_port *felix_port = dp->priv; - - if (!felix_port) - return; - - dp->priv = NULL; - kfree(felix_port); + case DSA_TAG_PROTO_OCELOT: + case DSA_TAG_PROTO_SEVILLE: + return 0; + default: + return -EPROTONOSUPPORT; + } } /* Hardware initialization done here so that we can allocate structures with @@ -1202,12 +1211,6 @@ static int felix_setup(struct dsa_switch *ds) } } - felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); - if (IS_ERR(felix->xmit_worker)) { - err = PTR_ERR(felix->xmit_worker); - goto out_deinit_timestamp; - } - for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) continue; @@ -1218,14 +1221,6 @@ static int felix_setup(struct dsa_switch *ds) * bits of vlan tag. */ felix_port_qos_map_init(ocelot, port); - - err = felix_port_setup_tagger_data(ds, port); - if (err) { - dev_err(ds->dev, - "port %d failed to set up tagger data: %pe\n", - port, ERR_PTR(err)); - goto out_deinit_ports; - } } err = ocelot_devlink_sb_register(ocelot); @@ -1253,13 +1248,9 @@ out_deinit_ports: if (dsa_is_unused_port(ds, port)) continue; - felix_port_teardown_tagger_data(ds, port); ocelot_deinit_port(ocelot, port); } - kthread_destroy_worker(felix->xmit_worker); - -out_deinit_timestamp: ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1288,12 +1279,9 @@ static void felix_teardown(struct dsa_switch *ds) if (dsa_is_unused_port(ds, port)) continue; - felix_port_teardown_tagger_data(ds, port); ocelot_deinit_port(ocelot, port); } - kthread_destroy_worker(felix->xmit_worker); - ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1633,6 +1621,7 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port, const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, + .connect_tag_protocol = felix_connect_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, .set_ageing_time = felix_set_ageing_time, @@ -1644,6 +1633,7 @@ const struct dsa_switch_ops felix_switch_ops = { .phylink_mac_config = felix_phylink_mac_config, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, + .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index be3e42e135c0..9395ac119d33 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -21,8 +21,10 @@ struct felix_info { int num_ports; int num_tx_queues; struct vcap_props *vcap; - int switch_pci_bar; - int imdio_pci_bar; + u16 vcap_pol_base; + u16 vcap_pol_max; + u16 vcap_pol_base2; + u16 vcap_pol_max2; const struct ptp_clock_info *ptp_caps; /* Some Ocelot switches are integrated into the SoC without the @@ -48,6 +50,8 @@ struct felix_info { enum tc_setup_type type, void *type_data); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, u32 speed); + struct regmap *(*init_regmap)(struct ocelot *ocelot, + struct resource *res); }; extern const struct dsa_switch_ops felix_switch_ops; @@ -58,7 +62,7 @@ struct felix { const struct felix_info *info; struct ocelot ocelot; struct mii_bus *imdio; - struct lynx_pcs **pcs; + struct phylink_pcs **pcs; resource_size_t switch_base; resource_size_t imdio_base; enum dsa_tag_protocol tag_proto; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 45c5ec7a83ea..bf8d38239e7e 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -5,8 +5,10 @@ #include <linux/fsl/enetc_mdio.h> #include <soc/mscc/ocelot_qsys.h> #include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot_sys.h> +#include <net/tc_act/tc_gate.h> #include <soc/mscc/ocelot.h> #include <linux/dsa/ocelot.h> #include <linux/pcs-lynx.h> @@ -17,6 +19,10 @@ #include "felix.h" #define VSC9959_TAS_GCL_ENTRY_MAX 63 +#define VSC9959_VCAP_POLICER_BASE 63 +#define VSC9959_VCAP_POLICER_MAX 383 +#define VSC9959_SWITCH_PCI_BAR 4 +#define VSC9959_IMDIO_PCI_BAR 0 static const u32 vsc9959_ana_regmap[] = { REG(ANA_ADVLEARN, 0x0089a0), @@ -292,7 +298,7 @@ static const u32 vsc9959_sys_regmap[] = { REG_RESERVED(SYS_MMGT_FAST), REG_RESERVED(SYS_EVENTS_DIF), REG_RESERVED(SYS_EVENTS_CORE), - REG_RESERVED(SYS_CNT), + REG(SYS_CNT, 0x000000), REG(SYS_PTP_STATUS, 0x000f14), REG(SYS_PTP_TXSTAMP, 0x000f18), REG(SYS_PTP_NXT, 0x000f1c), @@ -1020,15 +1026,6 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse) *maxuse = val & GENMASK(11, 0); } -static const struct ocelot_ops vsc9959_ops = { - .reset = vsc9959_reset, - .wm_enc = vsc9959_wm_enc, - .wm_dec = vsc9959_wm_dec, - .wm_stat = vsc9959_wm_stat, - .port_to_netdev = felix_port_to_netdev, - .netdev_to_port = felix_netdev_to_port, -}; - static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); @@ -1042,7 +1039,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) int rc; felix->pcs = devm_kcalloc(dev, felix->info->num_ports, - sizeof(struct lynx_pcs *), + sizeof(struct phylink_pcs *), GFP_KERNEL); if (!felix->pcs) { dev_err(dev, "failed to allocate array for PCS PHYs\n"); @@ -1091,8 +1088,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct mdio_device *pcs; - struct lynx_pcs *lynx; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; if (dsa_is_unused_port(felix->ds, port)) continue; @@ -1100,17 +1097,17 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - pcs = mdio_device_create(felix->imdio, port); - if (IS_ERR(pcs)) + mdio_device = mdio_device_create(felix->imdio, port); + if (IS_ERR(mdio_device)) continue; - lynx = lynx_pcs_create(pcs); - if (!lynx) { - mdio_device_free(pcs); + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); continue; } - felix->pcs[port] = lynx; + felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", port); } @@ -1124,13 +1121,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) int port; for (port = 0; port < ocelot->num_phys_ports; port++) { - struct lynx_pcs *pcs = felix->pcs[port]; + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; - if (!pcs) + if (!phylink_pcs) continue; - mdio_device_free(pcs->mdio); - lynx_pcs_destroy(pcs); + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); } mdiobus_unregister(felix->imdio); } @@ -1344,6 +1343,877 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, } } +#define VSC9959_PSFP_SFID_MAX 175 +#define VSC9959_PSFP_GATE_ID_MAX 183 +#define VSC9959_PSFP_POLICER_BASE 63 +#define VSC9959_PSFP_POLICER_MAX 383 +#define VSC9959_PSFP_GATE_LIST_NUM 4 +#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000 + +struct felix_stream { + struct list_head list; + unsigned long id; + bool dummy; + int ports; + int port; + u8 dmac[ETH_ALEN]; + u16 vid; + s8 prio; + u8 sfid_valid; + u8 ssid_valid; + u32 sfid; + u32 ssid; +}; + +struct felix_stream_filter { + struct list_head list; + refcount_t refcount; + u32 index; + u8 enable; + int portmask; + u8 sg_valid; + u32 sgid; + u8 fm_valid; + u32 fmid; + u8 prio_valid; + u8 prio; + u32 maxsdu; +}; + +struct felix_stream_filter_counters { + u32 match; + u32 not_pass_gate; + u32 not_pass_sdu; + u32 red; +}; + +struct felix_stream_gate { + u32 index; + u8 enable; + u8 ipv_valid; + u8 init_ipv; + u64 basetime; + u64 cycletime; + u64 cycletime_ext; + u32 num_entries; + struct action_gate_entry entries[]; +}; + +struct felix_stream_gate_entry { + struct list_head list; + refcount_t refcount; + u32 index; +}; + +static int vsc9959_stream_identify(struct flow_cls_offload *f, + struct felix_stream *stream) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) + return -EOPNOTSUPP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(stream->dmac, match.key->dst); + if (!is_zero_ether_addr(match.mask->src)) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) + stream->prio = match.key->vlan_priority; + else + stream->prio = -1; + + if (!match.mask->vlan_id) + return -EOPNOTSUPP; + stream->vid = match.key->vlan_id; + } else { + return -EOPNOTSUPP; + } + + stream->id = f->cookie; + + return 0; +} + +static int vsc9959_mact_stream_set(struct ocelot *ocelot, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + enum macaccess_entry_type type; + int ret, sfid, ssid; + u32 vid, dst_idx; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, stream->dmac); + vid = stream->vid; + + /* Stream identification desn't support to add a stream with non + * existent MAC (The MAC entry has not been learned in MAC table). + */ + ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type); + if (ret) { + if (extack) + NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table"); + return -EOPNOTSUPP; + } + + if ((stream->sfid_valid || stream->ssid_valid) && + type == ENTRYTYPE_NORMAL) + type = ENTRYTYPE_LOCKED; + + sfid = stream->sfid_valid ? stream->sfid : -1; + ssid = stream->ssid_valid ? stream->ssid : -1; + + ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type, + sfid, ssid); + + return ret; +} + +static struct felix_stream * +vsc9959_stream_table_lookup(struct list_head *stream_list, + struct felix_stream *stream) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (ether_addr_equal(tmp->dmac, stream->dmac) && + tmp->vid == stream->vid) + return tmp; + + return NULL; +} + +static int vsc9959_stream_table_add(struct ocelot *ocelot, + struct list_head *stream_list, + struct felix_stream *stream, + struct netlink_ext_ack *extack) +{ + struct felix_stream *stream_entry; + int ret; + + stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL); + if (!stream_entry) + return -ENOMEM; + + if (!stream->dummy) { + ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack); + if (ret) { + kfree(stream_entry); + return ret; + } + } + + list_add_tail(&stream_entry->list, stream_list); + + return 0; +} + +static struct felix_stream * +vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id) +{ + struct felix_stream *tmp; + + list_for_each_entry(tmp, stream_list, list) + if (tmp->id == id) + return tmp; + + return NULL; +} + +static void vsc9959_stream_table_del(struct ocelot *ocelot, + struct felix_stream *stream) +{ + if (!stream->dummy) + vsc9959_mact_stream_set(ocelot, stream, NULL); + + list_del(&stream->list); + kfree(stream); +} + +static u32 vsc9959_sfi_access_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS); +} + +static int vsc9959_psfp_sfi_set(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + u32 val; + + if (sfi->index > VSC9959_PSFP_SFID_MAX) + return -EINVAL; + + if (!sfi->enable) { + ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE); + ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); + } + + if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX || + sfi->fmid > VSC9959_PSFP_POLICER_MAX) + return -EINVAL; + + ocelot_write(ocelot, + (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) | + ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) | + (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) | + ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) | + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index), + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) | + ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) | + ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) | + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports) +{ + u32 val; + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid), + ANA_TABLES_SFIDTIDX_SFID_INDEX_M, + ANA_TABLES_SFIDTIDX); + + ocelot_write(ocelot, + ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) | + ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA, + ANA_TABLES_SFID_MASK); + + ocelot_rmw(ocelot, + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE), + ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M, + ANA_TABLES_SFIDACCESS); + + return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val, + (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)), + 10, 100000); +} + +static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct list_head *pos) +{ + struct felix_stream_filter *sfi_entry; + int ret; + + sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL); + if (!sfi_entry) + return -ENOMEM; + + refcount_set(&sfi_entry->refcount, 1); + + ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry); + if (ret) { + kfree(sfi_entry); + return ret; + } + + vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask); + + list_add(&sfi_entry->list, pos); + + return 0; +} + +static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot, + struct felix_stream_filter *sfi) +{ + struct list_head *pos, *q, *last; + struct felix_stream_filter *tmp; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + if (sfi->sg_valid == tmp->sg_valid && + sfi->fm_valid == tmp->fm_valid && + sfi->portmask == tmp->portmask && + tmp->sgid == sfi->sgid && + tmp->fmid == sfi->fmid) { + sfi->index = tmp->index; + refcount_inc(&tmp->refcount); + return 0; + } + /* Make sure that the index is increasing in order. */ + if (tmp->index == insert) { + last = pos; + insert++; + } + } + sfi->index = insert; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi, last); +} + +static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot, + struct felix_stream_filter *sfi, + struct felix_stream_filter *sfi2) +{ + struct felix_stream_filter *tmp; + struct list_head *pos, *q, *last; + struct ocelot_psfp_list *psfp; + u32 insert = 0; + int ret; + + psfp = &ocelot->psfp; + last = &psfp->sfi_list; + + list_for_each_safe(pos, q, &psfp->sfi_list) { + tmp = list_entry(pos, struct felix_stream_filter, list); + /* Make sure that the index is increasing in order. */ + if (tmp->index >= insert + 2) + break; + + insert = tmp->index + 1; + last = pos; + } + sfi->index = insert; + + ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last); + if (ret) + return ret; + + sfi2->index = insert + 1; + + return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next); +} + +static struct felix_stream_filter * +vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index) +{ + struct felix_stream_filter *tmp; + + list_for_each_entry(tmp, sfi_list, list) + if (tmp->index == index) + return tmp; + + return NULL; +} + +static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index) +{ + struct felix_stream_filter *tmp, *n; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + tmp->enable = 0; + vsc9959_psfp_sfi_set(ocelot, tmp); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry, + struct felix_stream_gate *sgi) +{ + sgi->index = entry->hw_index; + sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1; + sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0; + sgi->basetime = entry->gate.basetime; + sgi->cycletime = entry->gate.cycletime; + sgi->num_entries = entry->gate.num_entries; + sgi->enable = 1; + + memcpy(sgi->entries, entry->gate.entries, + entry->gate.num_entries * sizeof(struct action_gate_entry)); +} + +static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL); +} + +static int vsc9959_psfp_sgi_set(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct action_gate_entry *e; + struct timespec64 base_ts; + u32 interval_sum = 0; + u32 val; + int i; + + if (sgi->index > VSC9959_PSFP_GATE_ID_MAX) + return -EINVAL; + + ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index), + ANA_SG_ACCESS_CTRL); + + if (!sgi->enable) { + ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE, + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_GATE_ENABLE, + ANA_SG_CONFIG_REG_3); + + return 0; + } + + if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN || + sgi->cycletime > NSEC_PER_SEC) + return -EINVAL; + + if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM) + return -EINVAL; + + vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts); + ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1); + val = lower_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2); + + val = upper_32_bits(base_ts.tv_sec); + ocelot_write(ocelot, + (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) | + ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) | + ANA_SG_CONFIG_REG_3_GATE_ENABLE | + ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) | + ANA_SG_CONFIG_REG_3_INIT_GATE_STATE | + ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val), + ANA_SG_CONFIG_REG_3); + + ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4); + + e = sgi->entries; + for (i = 0; i < sgi->num_entries; i++) { + u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8); + + ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) | + (e[i].gate_state ? + ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0), + ANA_SG_GCL_GS_CONFIG, i); + + interval_sum += e[i].interval; + ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i); + } + + ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL_CONFIG_CHANGE, + ANA_SG_ACCESS_CTRL); + + return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val, + (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)), + 10, 100000); +} + +static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot, + struct felix_stream_gate *sgi) +{ + struct felix_stream_gate_entry *tmp; + struct ocelot_psfp_list *psfp; + int ret; + + psfp = &ocelot->psfp; + + list_for_each_entry(tmp, &psfp->sgi_list, list) + if (tmp->index == sgi->index) { + refcount_inc(&tmp->refcount); + return 0; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = vsc9959_psfp_sgi_set(ocelot, sgi); + if (ret) { + kfree(tmp); + return ret; + } + + tmp->index = sgi->index; + refcount_set(&tmp->refcount, 1); + list_add_tail(&tmp->list, &psfp->sgi_list); + + return 0; +} + +static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot, + u32 index) +{ + struct felix_stream_gate_entry *tmp, *n; + struct felix_stream_gate sgi = {0}; + struct ocelot_psfp_list *psfp; + u8 z; + + psfp = &ocelot->psfp; + + list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list) + if (tmp->index == index) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + sgi.index = index; + sgi.enable = 0; + vsc9959_psfp_sgi_set(ocelot, &sgi); + list_del(&tmp->list); + kfree(tmp); + } + break; + } +} + +static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index, + struct felix_stream_filter_counters *counters) +{ + ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index), + SYS_STAT_CFG_STAT_VIEW_M, + SYS_STAT_CFG); + + counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200); + counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201); + counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202); + counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203); + + /* Clear the PSFP counter. */ + ocelot_write(ocelot, + SYS_STAT_CFG_STAT_VIEW(index) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10), + SYS_STAT_CFG); +} + +static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct felix_stream_filter old_sfi, *sfi_entry; + struct felix_stream_filter sfi = {0}; + const struct flow_action_entry *a; + struct felix_stream *stream_entry; + struct felix_stream stream = {0}; + struct felix_stream_gate *sgi; + struct ocelot_psfp_list *psfp; + struct ocelot_policer pol; + int ret, i, size; + u64 rate, burst; + u32 index; + + psfp = &ocelot->psfp; + + ret = vsc9959_stream_identify(f, &stream); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC"); + return ret; + } + + flow_action_for_each(i, a, &f->rule->action) { + switch (a->id) { + case FLOW_ACTION_GATE: + size = struct_size(sgi, entries, a->gate.num_entries); + sgi = kzalloc(size, GFP_KERNEL); + vsc9959_psfp_parse_gate(a, sgi); + ret = vsc9959_psfp_sgi_table_add(ocelot, sgi); + if (ret) { + kfree(sgi); + goto err; + } + sfi.sg_valid = 1; + sfi.sgid = sgi->index; + kfree(sgi); + break; + case FLOW_ACTION_POLICE: + index = a->hw_index + VSC9959_PSFP_POLICER_BASE; + if (index > VSC9959_PSFP_POLICER_MAX) { + ret = -EINVAL; + goto err; + } + + rate = a->police.rate_bytes_ps; + burst = rate * PSCHED_NS2TICKS(a->police.burst); + pol = (struct ocelot_policer) { + .burst = div_u64(burst, PSCHED_TICKS_PER_SEC), + .rate = div_u64(rate, 1000) * 8, + }; + ret = ocelot_vcap_policer_add(ocelot, index, &pol); + if (ret) + goto err; + + sfi.fm_valid = 1; + sfi.fmid = index; + sfi.maxsdu = a->police.mtu; + break; + default: + return -EOPNOTSUPP; + } + } + + stream.ports = BIT(port); + stream.port = port; + + sfi.portmask = stream.ports; + sfi.prio_valid = (stream.prio < 0 ? 0 : 1); + sfi.prio = (sfi.prio_valid ? stream.prio : 0); + sfi.enable = 1; + + /* Check if stream is set. */ + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream); + if (stream_entry) { + if (stream_entry->ports & BIT(port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on this port"); + ret = -EEXIST; + goto err; + } + + if (stream_entry->ports != BIT(stream_entry->port)) { + NL_SET_ERR_MSG_MOD(extack, + "The stream is added on two ports"); + ret = -EEXIST; + goto err; + } + + stream_entry->ports |= BIT(port); + stream.ports = stream_entry->ports; + + sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, + stream_entry->sfid); + memcpy(&old_sfi, sfi_entry, sizeof(old_sfi)); + + vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid); + + old_sfi.portmask = stream_entry->ports; + sfi.portmask = stream.ports; + + if (stream_entry->port > port) { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi, + &old_sfi); + stream_entry->dummy = true; + } else { + ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi, + &sfi); + stream.dummy = true; + } + if (ret) + goto err; + + stream_entry->sfid = old_sfi.index; + } else { + ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi); + if (ret) + goto err; + } + + stream.sfid = sfi.index; + stream.sfid_valid = 1; + ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list, + &stream, extack); + if (ret) { + vsc9959_psfp_sfi_table_del(ocelot, stream.sfid); + goto err; + } + + return 0; + +err: + if (sfi.sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid); + + if (sfi.fm_valid) + ocelot_vcap_policer_del(ocelot, sfi.fmid); + + return ret; +} + +static int vsc9959_psfp_filter_del(struct ocelot *ocelot, + struct flow_cls_offload *f) +{ + struct felix_stream *stream, tmp, *stream_entry; + static struct felix_stream_filter *sfi; + struct ocelot_psfp_list *psfp; + + psfp = &ocelot->psfp; + + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) + return -ENOMEM; + + sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid); + if (!sfi) + return -ENOMEM; + + if (sfi->sg_valid) + vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid); + + if (sfi->fm_valid) + ocelot_vcap_policer_del(ocelot, sfi->fmid); + + vsc9959_psfp_sfi_table_del(ocelot, stream->sfid); + + memcpy(&tmp, stream, sizeof(tmp)); + + stream->sfid_valid = 0; + vsc9959_stream_table_del(ocelot, stream); + + stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp); + if (stream_entry) { + stream_entry->ports = BIT(stream_entry->port); + if (stream_entry->dummy) { + stream_entry->dummy = false; + vsc9959_mact_stream_set(ocelot, stream_entry, NULL); + } + vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid, + stream_entry->ports); + } + + return 0; +} + +static int vsc9959_psfp_stats_get(struct ocelot *ocelot, + struct flow_cls_offload *f, + struct flow_stats *stats) +{ + struct felix_stream_filter_counters counters; + struct ocelot_psfp_list *psfp; + struct felix_stream *stream; + + psfp = &ocelot->psfp; + stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie); + if (!stream) + return -ENOMEM; + + vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters); + + stats->pkts = counters.match; + stats->drops = counters.not_pass_gate + counters.not_pass_sdu + + counters.red; + + return 0; +} + +static void vsc9959_psfp_init(struct ocelot *ocelot) +{ + struct ocelot_psfp_list *psfp = &ocelot->psfp; + + INIT_LIST_HEAD(&psfp->stream_list); + INIT_LIST_HEAD(&psfp->sfi_list); + INIT_LIST_HEAD(&psfp->sgi_list); +} + +/* When using cut-through forwarding and the egress port runs at a higher data + * rate than the ingress port, the packet currently under transmission would + * suffer an underrun since it would be transmitted faster than it is received. + * The Felix switch implementation of cut-through forwarding does not check in + * hardware whether this condition is satisfied or not, so we must restrict the + * list of ports that have cut-through forwarding enabled on egress to only be + * the ports operating at the lowest link speed within their respective + * forwarding domain. + */ +static void vsc9959_cut_through_fwd(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_switch *ds = felix->ds; + int port, other_port; + + lockdep_assert_held(&ocelot->fwd_domain_lock); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int min_speed = ocelot_port->speed; + unsigned long mask = 0; + u32 tmp, val = 0; + + /* Disable cut-through on ports that are down */ + if (ocelot_port->speed <= 0) + goto set; + + if (dsa_is_cpu_port(ds, port)) { + /* Ocelot switches forward from the NPI port towards + * any port, regardless of it being in the NPI port's + * forwarding domain or not. + */ + mask = dsa_user_ports(ds); + } else { + mask = ocelot_get_bridge_fwd_mask(ocelot, port); + mask &= ~BIT(port); + if (ocelot->npi >= 0) + mask |= BIT(ocelot->npi); + else + mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot); + } + + /* Calculate the minimum link speed, among the ports that are + * up, of this source port's forwarding domain. + */ + for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) { + struct ocelot_port *other_ocelot_port; + + other_ocelot_port = ocelot->ports[other_port]; + if (other_ocelot_port->speed <= 0) + continue; + + if (min_speed > other_ocelot_port->speed) + min_speed = other_ocelot_port->speed; + } + + /* Enable cut-through forwarding for all traffic classes. */ + if (ocelot_port->speed == min_speed) + val = GENMASK(7, 0); + +set: + tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port); + if (tmp == val) + continue; + + dev_dbg(ocelot->dev, + "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n", + port, mask, ocelot_port->speed, min_speed, + val ? "enabling" : "disabling"); + + ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port); + } +} + +static const struct ocelot_ops vsc9959_ops = { + .reset = vsc9959_reset, + .wm_enc = vsc9959_wm_enc, + .wm_dec = vsc9959_wm_dec, + .wm_stat = vsc9959_wm_stat, + .port_to_netdev = felix_port_to_netdev, + .netdev_to_port = felix_netdev_to_port, + .psfp_init = vsc9959_psfp_init, + .psfp_filter_add = vsc9959_psfp_filter_add, + .psfp_filter_del = vsc9959_psfp_filter_del, + .psfp_stats_get = vsc9959_psfp_stats_get, + .cut_through_fwd = vsc9959_cut_through_fwd, +}; + static const struct felix_info felix_info_vsc9959 = { .target_io_res = vsc9959_target_io_res, .port_io_res = vsc9959_port_io_res, @@ -1354,11 +2224,13 @@ static const struct felix_info felix_info_vsc9959 = { .stats_layout = vsc9959_stats_layout, .num_stats = ARRAY_SIZE(vsc9959_stats_layout), .vcap = vsc9959_vcap_props, + .vcap_pol_base = VSC9959_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9959_VCAP_POLICER_MAX, + .vcap_pol_base2 = 0, + .vcap_pol_max2 = 0, .num_mact_rows = 2048, .num_ports = 6, .num_tx_queues = OCELOT_NUM_TC, - .switch_pci_bar = 4, - .imdio_pci_bar = 0, .quirk_no_xtr_irq = true, .ptp_caps = &vsc9959_ptp_caps, .mdio_bus_alloc = vsc9959_mdio_bus_alloc, @@ -1367,6 +2239,7 @@ static const struct felix_info felix_info_vsc9959 = { .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .init_regmap = ocelot_regmap_init, }; static irqreturn_t felix_irq_handler(int irq, void *data) @@ -1417,10 +2290,8 @@ static int felix_pci_probe(struct pci_dev *pdev, ocelot->dev = &pdev->dev; ocelot->num_flooding_pgids = OCELOT_NUM_TC; felix->info = &felix_info_vsc9959; - felix->switch_base = pci_resource_start(pdev, - felix->info->switch_pci_bar); - felix->imdio_base = pci_resource_start(pdev, - felix->info->imdio_pci_bar); + felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); + felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR); pci_set_master(pdev); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 92eae63150ea..8c1c9da61602 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -6,18 +6,18 @@ #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot.h> +#include <linux/mdio/mdio-mscc-miim.h> +#include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/pcs-lynx.h> #include <linux/dsa/ocelot.h> #include <linux/iopoll.h> #include "felix.h" -#define MSCC_MIIM_CMD_OPR_WRITE BIT(1) -#define MSCC_MIIM_CMD_OPR_READ BIT(2) -#define MSCC_MIIM_CMD_WRDATA_SHIFT 4 -#define MSCC_MIIM_CMD_REGAD_SHIFT 20 -#define MSCC_MIIM_CMD_PHYAD_SHIFT 25 -#define MSCC_MIIM_CMD_VLD BIT(31) +#define VSC9953_VCAP_POLICER_BASE 11 +#define VSC9953_VCAP_POLICER_MAX 31 +#define VSC9953_VCAP_POLICER_BASE2 120 +#define VSC9953_VCAP_POLICER_MAX2 161 static const u32 vsc9953_ana_regmap[] = { REG(ANA_ADVLEARN, 0x00b500), @@ -857,7 +857,6 @@ static struct vcap_props vsc9953_vcap_props[] = { #define VSC9953_INIT_TIMEOUT 50000 #define VSC9953_GCB_RST_SLEEP 100 #define VSC9953_SYS_RAMINIT_SLEEP 80 -#define VCS9953_MII_TIMEOUT 10000 static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot) { @@ -877,82 +876,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot) return val; } -static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot) -{ - int val; - - ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val); - - return val; -} - -static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot) -{ - int val; - - ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val); - - return val; -} - -static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum, - u16 value) -{ - struct ocelot *ocelot = bus->priv; - int err, cmd, val; - - /* Wait while MIIM controller becomes idle */ - err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO write: pending timeout\n"); - goto out; - } - - cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | - (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | - MSCC_MIIM_CMD_OPR_WRITE; - - ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); - -out: - return err; -} - -static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum) -{ - struct ocelot *ocelot = bus->priv; - int err, cmd, val; - - /* Wait until MIIM controller becomes idle */ - err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO read: pending timeout\n"); - goto out; - } - - /* Write the MIIM COMMAND register */ - cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ; - - ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); - - /* Wait while read operation via the MIIM controller is in progress */ - err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot, - val, !val, 10, VCS9953_MII_TIMEOUT); - if (err) { - dev_err(ocelot->dev, "MDIO read: busy timeout\n"); - goto out; - } - - val = ocelot_read(ocelot, GCB_MIIM_MII_DATA); - - err = val & 0xFFFF; -out: - return err; -} /* CORE_ENA is in SYS:SYSTEM:RESET_CFG * MEM_INIT is in SYS:SYSTEM:RESET_CFG @@ -1089,26 +1012,24 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) int rc; felix->pcs = devm_kcalloc(dev, felix->info->num_ports, - sizeof(struct phy_device *), + sizeof(struct phylink_pcs *), GFP_KERNEL); if (!felix->pcs) { dev_err(dev, "failed to allocate array for PCS PHYs\n"); return -ENOMEM; } - bus = devm_mdiobus_alloc(dev); - if (!bus) - return -ENOMEM; + rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus", + ocelot->targets[GCB], + ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]); - bus->name = "VSC9953 internal MDIO bus"; - bus->read = vsc9953_mdio_read; - bus->write = vsc9953_mdio_write; - bus->parent = dev; - bus->priv = ocelot; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + if (rc) { + dev_err(dev, "failed to setup MDIO bus\n"); + return rc; + } /* Needed in order to initialize the bus mutex lock */ - rc = mdiobus_register(bus); + rc = of_mdiobus_register(bus, NULL); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); return rc; @@ -1118,9 +1039,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) for (port = 0; port < felix->info->num_ports; port++) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; int addr = port + 4; - struct mdio_device *pcs; - struct lynx_pcs *lynx; if (dsa_is_unused_port(felix->ds, port)) continue; @@ -1128,17 +1049,17 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) continue; - pcs = mdio_device_create(felix->imdio, addr); - if (IS_ERR(pcs)) + mdio_device = mdio_device_create(felix->imdio, addr); + if (IS_ERR(mdio_device)) continue; - lynx = lynx_pcs_create(pcs); - if (!lynx) { - mdio_device_free(pcs); + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); continue; } - felix->pcs[port] = lynx; + felix->pcs[port] = phylink_pcs; dev_info(dev, "Found PCS at internal MDIO address %d\n", addr); } @@ -1152,13 +1073,15 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot) int port; for (port = 0; port < ocelot->num_phys_ports; port++) { - struct lynx_pcs *pcs = felix->pcs[port]; + struct phylink_pcs *phylink_pcs = felix->pcs[port]; + struct mdio_device *mdio_device; - if (!pcs) + if (!phylink_pcs) continue; - mdio_device_free(pcs->mdio); - lynx_pcs_destroy(pcs); + mdio_device = lynx_get_mdio_device(phylink_pcs); + mdio_device_free(mdio_device); + lynx_pcs_destroy(phylink_pcs); } mdiobus_unregister(felix->imdio); } @@ -1172,6 +1095,10 @@ static const struct felix_info seville_info_vsc9953 = { .stats_layout = vsc9953_stats_layout, .num_stats = ARRAY_SIZE(vsc9953_stats_layout), .vcap = vsc9953_vcap_props, + .vcap_pol_base = VSC9953_VCAP_POLICER_BASE, + .vcap_pol_max = VSC9953_VCAP_POLICER_MAX, + .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2, + .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2, .num_mact_rows = 2048, .num_ports = 10, .num_tx_queues = OCELOT_NUM_TC, @@ -1179,6 +1106,7 @@ static const struct felix_info seville_info_vsc9953 = { .mdio_bus_free = vsc9953_mdio_bus_free, .phylink_validate = vsc9953_phylink_validate, .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode, + .init_regmap = ocelot_regmap_init, }; static int seville_probe(struct platform_device *pdev) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a429c9750add..039694518788 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -9,6 +9,8 @@ #include <linux/module.h> #include <linux/phy.h> #include <linux/netdevice.h> +#include <linux/bitfield.h> +#include <linux/regmap.h> #include <net/dsa.h> #include <linux/of_net.h> #include <linux/of_mdio.h> @@ -68,6 +70,8 @@ static const struct qca8k_mib_desc ar8327_mib[] = { MIB_DESC(1, 0x9c, "TxExcDefer"), MIB_DESC(1, 0xa0, "TxDefer"), MIB_DESC(1, 0xa4, "TxLateCol"), + MIB_DESC(1, 0xa8, "RXUnicast"), + MIB_DESC(1, 0xac, "TXUnicast"), }; /* The 32bit switch registers are accessed indirectly. To achieve this we need @@ -151,6 +155,25 @@ qca8k_set_page(struct mii_bus *bus, u16 page) static int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val) { + return regmap_read(priv->regmap, reg, val); +} + +static int +qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) +{ + return regmap_write(priv->regmap, reg, val); +} + +static int +qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) +{ + return regmap_update_bits(priv->regmap, reg, mask, write_val); +} + +static int +qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) +{ + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; struct mii_bus *bus = priv->bus; u16 r1, r2, page; int ret; @@ -171,8 +194,9 @@ exit: } static int -qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val) +qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; struct mii_bus *bus = priv->bus; u16 r1, r2, page; int ret; @@ -193,8 +217,9 @@ exit: } static int -qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val) +qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val) { + struct qca8k_priv *priv = (struct qca8k_priv *)ctx; struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; @@ -222,34 +247,6 @@ exit: return ret; } -static int -qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val) -{ - return qca8k_rmw(priv, reg, 0, val); -} - -static int -qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val) -{ - return qca8k_rmw(priv, reg, val, 0); -} - -static int -qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ctx; - - return qca8k_read(priv, reg, val); -} - -static int -qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ctx; - - return qca8k_write(priv, reg, val); -} - static const struct regmap_range qca8k_readable_ranges[] = { regmap_reg_range(0x0000, 0x00e4), /* Global control */ regmap_reg_range(0x0100, 0x0168), /* EEE control */ @@ -281,26 +278,19 @@ static struct regmap_config qca8k_regmap_config = { .max_register = 0x16ac, /* end MIB - Port6 range */ .reg_read = qca8k_regmap_read, .reg_write = qca8k_regmap_write, + .reg_update_bits = qca8k_regmap_update_bits, .rd_table = &qca8k_readable_table, + .disable_locking = true, /* Locking is handled by qca8k read/write */ + .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */ }; static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) { - int ret, ret1; u32 val; - ret = read_poll_timeout(qca8k_read, ret1, !(val & mask), - 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false, - priv, reg, &val); - - /* Check if qca8k_read has failed for a different reason - * before returning -ETIMEDOUT - */ - if (ret < 0 && ret1 < 0) - return ret1; - - return ret; + return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0, + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC); } static int @@ -319,18 +309,18 @@ qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb) } /* vid - 83:72 */ - fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M; + fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]); /* aging - 67:64 */ - fdb->aging = reg[2] & QCA8K_ATU_STATUS_M; + fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]); /* portmask - 54:48 */ - fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M; + fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]); /* mac - 47:0 */ - fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff; - fdb->mac[1] = reg[1] & 0xff; - fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff; - fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff; - fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff; - fdb->mac[5] = reg[0] & 0xff; + fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]); + fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]); + fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]); + fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]); + fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]); + fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]); return 0; } @@ -343,18 +333,18 @@ qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac, int i; /* vid - 83:72 */ - reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S; + reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid); /* aging - 67:64 */ - reg[2] |= aging & QCA8K_ATU_STATUS_M; + reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging); /* portmask - 54:48 */ - reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S; + reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask); /* mac - 47:0 */ - reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S; - reg[1] |= mac[1]; - reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S; - reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S; - reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S; - reg[0] |= mac[5]; + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]); + reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]); + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]); + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]); + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]); + reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]); /* load the array into the ARL table */ for (i = 0; i < 3; i++) @@ -372,7 +362,7 @@ qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) reg |= cmd; if (port >= 0) { reg |= QCA8K_ATU_FUNC_PORT_EN; - reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S; + reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port); } /* Write the function register triggering the table access */ @@ -446,6 +436,81 @@ qca8k_fdb_flush(struct qca8k_priv *priv) } static int +qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, + const u8 *mac, u16 vid) +{ + struct qca8k_fdb fdb = { 0 }; + int ret; + + mutex_lock(&priv->reg_mutex); + + qca8k_fdb_write(priv, vid, 0, mac, 0); + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); + if (ret < 0) + goto exit; + + ret = qca8k_fdb_read(priv, &fdb); + if (ret < 0) + goto exit; + + /* Rule exist. Delete first */ + if (!fdb.aging) { + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); + if (ret) + goto exit; + } + + /* Add port to fdb portmask */ + fdb.port_mask |= port_mask; + + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); + +exit: + mutex_unlock(&priv->reg_mutex); + return ret; +} + +static int +qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, + const u8 *mac, u16 vid) +{ + struct qca8k_fdb fdb = { 0 }; + int ret; + + mutex_lock(&priv->reg_mutex); + + qca8k_fdb_write(priv, vid, 0, mac, 0); + ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); + if (ret < 0) + goto exit; + + /* Rule doesn't exist. Why delete? */ + if (!fdb.aging) { + ret = -EINVAL; + goto exit; + } + + ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); + if (ret) + goto exit; + + /* Only port in the rule is this port. Don't re insert */ + if (fdb.port_mask == port_mask) + goto exit; + + /* Remove port from port mask */ + fdb.port_mask &= ~port_mask; + + qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); + ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); + +exit: + mutex_unlock(&priv->reg_mutex); + return ret; +} + +static int qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) { u32 reg; @@ -454,7 +519,7 @@ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) /* Set the command and VLAN index */ reg = QCA8K_VTU_FUNC1_BUSY; reg |= cmd; - reg |= vid << QCA8K_VTU_FUNC1_VID_S; + reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); /* Write the function register triggering the table access */ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); @@ -500,13 +565,11 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) if (ret < 0) goto out; reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; - reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); if (untagged) - reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << - QCA8K_VTU_FUNC0_EG_MODE_S(port); + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port); else - reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << - QCA8K_VTU_FUNC0_EG_MODE_S(port); + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port); ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); if (ret) @@ -534,15 +597,13 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®); if (ret < 0) goto out; - reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); - reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << - QCA8K_VTU_FUNC0_EG_MODE_S(port); + reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); + reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port); /* Check if we're the last member to be removed */ del = true; for (i = 0; i < QCA8K_NUM_PORTS; i++) { - mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; - mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); + mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i); if ((reg & mask) != mask) { del = false; @@ -571,7 +632,7 @@ qca8k_mib_init(struct qca8k_priv *priv) int ret; mutex_lock(&priv->reg_mutex); - ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); + ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY); if (ret) goto exit; @@ -579,7 +640,7 @@ qca8k_mib_init(struct qca8k_priv *priv) if (ret) goto exit; - ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); + ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); if (ret) goto exit; @@ -600,9 +661,9 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) mask |= QCA8K_PORT_STATUS_LINK_AUTO; if (enable) - qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask); + regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); else - qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); + regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask); } static u32 @@ -864,8 +925,8 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) * a dt-overlay and driver reload changed the configuration */ - return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL, - QCA8K_MDIO_MASTER_EN); + return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL, + QCA8K_MDIO_MASTER_EN); } /* Check if the devicetree declare the port:phy mapping */ @@ -983,7 +1044,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv) u32 delay; /* We have 2 CPU port. Check them */ - for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) { + for (port = 0; port < QCA8K_NUM_PORTS; port++) { /* Skip every other port */ if (port != 0 && port != 6) continue; @@ -1014,7 +1075,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv) mode == PHY_INTERFACE_MODE_RGMII_TXID) delay = 1; - if (delay > QCA8K_MAX_DELAY) { + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) { dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value"); delay = 3; } @@ -1030,7 +1091,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv) mode == PHY_INTERFACE_MODE_RGMII_RXID) delay = 2; - if (delay > QCA8K_MAX_DELAY) { + if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) { dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value"); delay = 3; } @@ -1089,14 +1150,6 @@ qca8k_setup(struct dsa_switch *ds) if (ret) return ret; - mutex_init(&priv->reg_mutex); - - /* Start by setting up the register mapping */ - priv->regmap = devm_regmap_init(ds->dev, NULL, priv, - &qca8k_regmap_config); - if (IS_ERR(priv->regmap)) - dev_warn(priv->dev, "regmap initialization failed"); - ret = qca8k_setup_mdio_bus(priv); if (ret) return ret; @@ -1110,16 +1163,16 @@ qca8k_setup(struct dsa_switch *ds) return ret; /* Make sure MAC06 is disabled */ - ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL, - QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); + ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL, + QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN); if (ret) { dev_err(priv->dev, "failed disabling MAC06 exchange"); return ret; } /* Enable CPU Port */ - ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, - QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); if (ret) { dev_err(priv->dev, "failed enabling CPU port"); return ret; @@ -1141,8 +1194,8 @@ qca8k_setup(struct dsa_switch *ds) /* Enable QCA header mode on all cpu ports */ if (dsa_is_cpu_port(ds, i)) { ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i), - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S | - QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S); + FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) | + FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL)); if (ret) { dev_err(priv->dev, "failed enabling QCA header mode"); return ret; @@ -1159,10 +1212,10 @@ qca8k_setup(struct dsa_switch *ds) * for igmp, unknown, multicast and broadcast packet */ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S | - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S | - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S | - BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) | + FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port))); if (ret) return ret; @@ -1180,8 +1233,6 @@ qca8k_setup(struct dsa_switch *ds) /* Individual user ports get connected to CPU port only */ if (dsa_is_user_port(ds, i)) { - int shift = 16 * (i % 2); - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); @@ -1189,8 +1240,8 @@ qca8k_setup(struct dsa_switch *ds) return ret; /* Enable ARP Auto-learning by default */ - ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i), - QCA8K_PORT_LOOKUP_LEARN); + ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i), + QCA8K_PORT_LOOKUP_LEARN); if (ret) return ret; @@ -1198,8 +1249,8 @@ qca8k_setup(struct dsa_switch *ds) * default egress vid */ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), - 0xfff << shift, - QCA8K_PORT_VID_DEF << shift); + QCA8K_EGREES_VLAN_PORT_MASK(i), + QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF)); if (ret) return ret; @@ -1246,7 +1297,7 @@ qca8k_setup(struct dsa_switch *ds) QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | QCA8K_PORT_HOL_CTRL1_WRED_EN; qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i), - QCA8K_PORT_HOL_CTRL1_ING_BUF | + QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK | QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN | QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN | QCA8K_PORT_HOL_CTRL1_WRED_EN, @@ -1256,8 +1307,12 @@ qca8k_setup(struct dsa_switch *ds) /* Set initial MTU for every port. * We have only have a general MTU setting. So track * every port and set the max across all port. + * Set per port MTU to 1500 as the MTU change function + * will add the overhead and if its set to 1518 then it + * will apply the overhead again and we will end up with + * MTU of 1536 instead of 1518 */ - priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + priv->port_mtu[i] = ETH_DATA_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ @@ -1265,8 +1320,8 @@ qca8k_setup(struct dsa_switch *ds) mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) | QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496); qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH, - QCA8K_GLOBAL_FC_GOL_XON_THRES_S | - QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S, + QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK | + QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, mask); } @@ -1281,6 +1336,13 @@ qca8k_setup(struct dsa_switch *ds) /* We don't have interrupts for link changes, so we need to poll */ ds->pcs_poll = true; + /* Set min a max ageing value supported */ + ds->ageing_time_min = 7000; + ds->ageing_time_max = 458745000; + + /* Set max number of LAGs supported */ + ds->num_lag_ids = QCA8K_NUM_LAGS; + return 0; } @@ -1433,6 +1495,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); + /* From original code is reported port instability as SGMII also + * require delay set. Apply advised values here or take them from DT. + */ + if (state->interface == PHY_INTERFACE_MODE_SGMII) + qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); + /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and * falling edge is set writing in the PORT0 PAD reg */ @@ -1455,12 +1523,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE, val); - /* From original code is reported port instability as SGMII also - * require delay set. Apply advised values here or take them from DT. - */ - if (state->interface == PHY_INTERFACE_MODE_SGMII) - qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg); - break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", @@ -1627,12 +1689,16 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, static void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { + const struct qca8k_match_data *match_data; + struct qca8k_priv *priv = ds->priv; int i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) + match_data = of_device_get_match_data(priv->dev); + + for (i = 0; i < match_data->mib_count; i++) strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, ETH_GSTRING_LEN); } @@ -1642,12 +1708,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + const struct qca8k_match_data *match_data; const struct qca8k_mib_desc *mib; u32 reg, i, val; u32 hi = 0; int ret; - for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) { + match_data = of_device_get_match_data(priv->dev); + + for (i = 0; i < match_data->mib_count; i++) { mib = &ar8327_mib[i]; reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset; @@ -1670,10 +1739,15 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, static int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) { + const struct qca8k_match_data *match_data; + struct qca8k_priv *priv = ds->priv; + if (sset != ETH_SS_STATS) return 0; - return ARRAY_SIZE(ar8327_mib); + match_data = of_device_get_match_data(priv->dev); + + return match_data->mib_count; } static int @@ -1736,8 +1810,9 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); } -static int -qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) +static int qca8k_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int port_mask, cpu_port; @@ -1749,14 +1824,14 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) for (i = 0; i < QCA8K_NUM_PORTS; i++) { if (dsa_is_cpu_port(ds, i)) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Add this port to the portvlan mask of the other ports * in the bridge */ - ret = qca8k_reg_set(priv, - QCA8K_PORT_LOOKUP_CTRL(i), - BIT(port)); + ret = regmap_set_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(i), + BIT(port)); if (ret) return ret; if (i != port) @@ -1770,8 +1845,8 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) return ret; } -static void -qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) +static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int cpu_port, i; @@ -1781,14 +1856,14 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) for (i = 0; i < QCA8K_NUM_PORTS; i++) { if (dsa_is_cpu_port(ds, i)) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Remove this port to the portvlan mask of the other ports * in the bridge */ - qca8k_reg_clear(priv, - QCA8K_PORT_LOOKUP_CTRL(i), - BIT(port)); + regmap_clear_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(i), + BIT(port)); } /* Set the cpu port to be the only one in the portvlan mask of @@ -1798,6 +1873,36 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br) QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); } +static void +qca8k_port_fast_age(struct dsa_switch *ds, int port) +{ + struct qca8k_priv *priv = ds->priv; + + mutex_lock(&priv->reg_mutex); + qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port); + mutex_unlock(&priv->reg_mutex); +} + +static int +qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + struct qca8k_priv *priv = ds->priv; + unsigned int secs = msecs / 1000; + u32 val; + + /* AGE_TIME reg is set in 7s step */ + val = secs / 7; + + /* Handle case with 0 as val to NOT disable + * learning + */ + if (!val) + val = 1; + + return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, + QCA8K_ATU_AGE_TIME(val)); +} + static int qca8k_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) @@ -1904,6 +2009,121 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port, } static int +qca8k_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct qca8k_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + + return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid); +} + +static int +qca8k_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct qca8k_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + + return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); +} + +static int +qca8k_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + struct qca8k_priv *priv = ds->priv; + int monitor_port, ret; + u32 reg, val; + + /* Check for existent entry */ + if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) + return -EEXIST; + + ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val); + if (ret) + return ret; + + /* QCA83xx can have only one port set to mirror mode. + * Check that the correct port is requested and return error otherwise. + * When no mirror port is set, the values is set to 0xF + */ + monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); + if (monitor_port != 0xF && monitor_port != mirror->to_local_port) + return -EEXIST; + + /* Set the monitor port */ + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, + mirror->to_local_port); + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); + if (ret) + return ret; + + if (ingress) { + reg = QCA8K_PORT_LOOKUP_CTRL(port); + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; + } else { + reg = QCA8K_REG_PORT_HOL_CTRL1(port); + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; + } + + ret = regmap_update_bits(priv->regmap, reg, val, val); + if (ret) + return ret; + + /* Track mirror port for tx and rx to decide when the + * mirror port has to be disabled. + */ + if (ingress) + priv->mirror_rx |= BIT(port); + else + priv->mirror_tx |= BIT(port); + + return 0; +} + +static void +qca8k_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg, val; + int ret; + + if (mirror->ingress) { + reg = QCA8K_PORT_LOOKUP_CTRL(port); + val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; + } else { + reg = QCA8K_REG_PORT_HOL_CTRL1(port); + val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN; + } + + ret = regmap_clear_bits(priv->regmap, reg, val); + if (ret) + goto err; + + if (mirror->ingress) + priv->mirror_rx &= ~BIT(port); + else + priv->mirror_tx &= ~BIT(port); + + /* No port set to send packet to mirror port. Disable mirror port */ + if (!priv->mirror_rx && !priv->mirror_tx) { + val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val); + if (ret) + goto err; + } +err: + dev_err(priv->dev, "Failed to del mirror port from %d", port); +} + +static int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack) { @@ -1912,11 +2132,11 @@ qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, if (vlan_filtering) { ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); } else { ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); } @@ -1940,10 +2160,9 @@ qca8k_port_vlan_add(struct dsa_switch *ds, int port, } if (pvid) { - int shift = 16 * (port % 2); - ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), - 0xfff << shift, vlan->vid << shift); + QCA8K_EGREES_VLAN_PORT_MASK(port), + QCA8K_EGREES_VLAN_PORT(port, vlan->vid)); if (ret) return ret; @@ -1992,12 +2211,185 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, return DSA_TAG_PROTO_QCA; } +static bool +qca8k_lag_can_offload(struct dsa_switch *ds, + struct net_device *lag, + struct netdev_lag_upper_info *info) +{ + struct dsa_port *dp; + int id, members = 0; + + id = dsa_lag_id(ds->dst, lag); + if (id < 0 || id >= ds->num_lag_ids) + return false; + + dsa_lag_foreach_port(dp, ds->dst, lag) + /* Includes the port joining the LAG */ + members++; + + if (members > QCA8K_NUM_PORTS_FOR_LAG) + return false; + + if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + + if (info->hash_type != NETDEV_LAG_HASH_L2 && + info->hash_type != NETDEV_LAG_HASH_L23) + return false; + + return true; +} + +static int +qca8k_lag_setup_hash(struct dsa_switch *ds, + struct net_device *lag, + struct netdev_lag_upper_info *info) +{ + struct qca8k_priv *priv = ds->priv; + bool unique_lag = true; + u32 hash = 0; + int i, id; + + id = dsa_lag_id(ds->dst, lag); + + switch (info->hash_type) { + case NETDEV_LAG_HASH_L23: + hash |= QCA8K_TRUNK_HASH_SIP_EN; + hash |= QCA8K_TRUNK_HASH_DIP_EN; + fallthrough; + case NETDEV_LAG_HASH_L2: + hash |= QCA8K_TRUNK_HASH_SA_EN; + hash |= QCA8K_TRUNK_HASH_DA_EN; + break; + default: /* We should NEVER reach this */ + return -EOPNOTSUPP; + } + + /* Check if we are the unique configured LAG */ + dsa_lags_foreach_id(i, ds->dst) + if (i != id && dsa_lag_dev(ds->dst, i)) { + unique_lag = false; + break; + } + + /* Hash Mode is global. Make sure the same Hash Mode + * is set to all the 4 possible lag. + * If we are the unique LAG we can set whatever hash + * mode we want. + * To change hash mode it's needed to remove all LAG + * and change the mode with the latest. + */ + if (unique_lag) { + priv->lag_hash_mode = hash; + } else if (priv->lag_hash_mode != hash) { + netdev_err(lag, "Error: Mismatched Hash Mode across different lag is not supported\n"); + return -EOPNOTSUPP; + } + + return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL, + QCA8K_TRUNK_HASH_MASK, hash); +} + +static int +qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port, + struct net_device *lag, bool delete) +{ + struct qca8k_priv *priv = ds->priv; + int ret, id, i; + u32 val; + + id = dsa_lag_id(ds->dst, lag); + + /* Read current port member */ + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val); + if (ret) + return ret; + + /* Shift val to the correct trunk */ + val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id); + val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK; + if (delete) + val &= ~BIT(port); + else + val |= BIT(port); + + /* Update port member. With empty portmap disable trunk */ + ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, + QCA8K_REG_GOL_TRUNK_MEMBER(id) | + QCA8K_REG_GOL_TRUNK_EN(id), + !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) | + val << QCA8K_REG_GOL_TRUNK_SHIFT(id)); + + /* Search empty member if adding or port on deleting */ + for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) { + ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val); + if (ret) + return ret; + + val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i); + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK; + + if (delete) { + /* If port flagged to be disabled assume this member is + * empty + */ + if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) + continue; + + val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK; + if (val != port) + continue; + } else { + /* If port flagged to be enabled assume this member is + * already set + */ + if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK) + continue; + } + + /* We have found the member to add/remove */ + break; + } + + /* Set port in the correct port mask or disable port if in delete mode */ + return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) | + QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i), + !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) | + port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i)); +} + +static int +qca8k_port_lag_join(struct dsa_switch *ds, int port, + struct net_device *lag, + struct netdev_lag_upper_info *info) +{ + int ret; + + if (!qca8k_lag_can_offload(ds, lag, info)) + return -EOPNOTSUPP; + + ret = qca8k_lag_setup_hash(ds, lag, info); + if (ret) + return ret; + + return qca8k_lag_refresh_portmap(ds, port, lag, false); +} + +static int +qca8k_port_lag_leave(struct dsa_switch *ds, int port, + struct net_device *lag) +{ + return qca8k_lag_refresh_portmap(ds, port, lag, true); +} + static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, .get_strings = qca8k_get_strings, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, + .set_ageing_time = qca8k_set_ageing_time, .get_mac_eee = qca8k_get_mac_eee, .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, @@ -2007,9 +2399,14 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_stp_state_set = qca8k_port_stp_state_set, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, + .port_fast_age = qca8k_port_fast_age, .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, + .port_mdb_add = qca8k_port_mdb_add, + .port_mdb_del = qca8k_port_mdb_del, + .port_mirror_add = qca8k_port_mirror_add, + .port_mirror_del = qca8k_port_mirror_del, .port_vlan_filtering = qca8k_port_vlan_filtering, .port_vlan_add = qca8k_port_vlan_add, .port_vlan_del = qca8k_port_vlan_del, @@ -2019,6 +2416,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .phylink_mac_link_down = qca8k_phylink_mac_link_down, .phylink_mac_link_up = qca8k_phylink_mac_link_up, .get_phy_flags = qca8k_get_phy_flags, + .port_lag_join = qca8k_port_lag_join, + .port_lag_leave = qca8k_port_lag_leave, }; static int qca8k_read_switch_id(struct qca8k_priv *priv) @@ -2037,7 +2436,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv) if (ret < 0) return -ENODEV; - id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK); + id = QCA8K_MASK_CTRL_DEVICE_ID(val); if (id != data->id) { dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id); return -ENODEV; @@ -2046,7 +2445,7 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv) priv->switch_id = id; /* Save revision to communicate to the internal PHY driver */ - priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK); + priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val); return 0; } @@ -2081,6 +2480,14 @@ qca8k_sw_probe(struct mdio_device *mdiodev) gpiod_set_value_cansleep(priv->reset_gpio, 0); } + /* Start by setting up the register mapping */ + priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv, + &qca8k_regmap_config); + if (IS_ERR(priv->regmap)) { + dev_err(priv->dev, "regmap initialization failed"); + return PTR_ERR(priv->regmap); + } + /* Check the detected switch id */ ret = qca8k_read_switch_id(priv); if (ret) @@ -2169,14 +2576,17 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, static const struct qca8k_match_data qca8327 = { .id = QCA8K_ID_QCA8327, .reduced_package = true, + .mib_count = QCA8K_QCA832X_MIB_COUNT, }; static const struct qca8k_match_data qca8328 = { .id = QCA8K_ID_QCA8327, + .mib_count = QCA8K_QCA832X_MIB_COUNT, }; static const struct qca8k_match_data qca833x = { .id = QCA8K_ID_QCA8337, + .mib_count = QCA8K_QCA833X_MIB_COUNT, }; static const struct of_device_id qca8k_of_match[] = { diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 128b8cf85e08..ab4a417b25a9 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -15,12 +15,17 @@ #define QCA8K_NUM_PORTS 7 #define QCA8K_NUM_CPU_PORTS 2 #define QCA8K_MAX_MTU 9000 +#define QCA8K_NUM_LAGS 4 +#define QCA8K_NUM_PORTS_FOR_LAG 4 #define PHY_ID_QCA8327 0x004dd034 #define QCA8K_ID_QCA8327 0x12 #define PHY_ID_QCA8337 0x004dd036 #define QCA8K_ID_QCA8337 0x13 +#define QCA8K_QCA832X_MIB_COUNT 39 +#define QCA8K_QCA833X_MIB_COUNT 41 + #define QCA8K_BUSY_WAIT_TIMEOUT 2000 #define QCA8K_NUM_FDB_RECORDS 2048 @@ -30,9 +35,9 @@ /* Global control registers */ #define QCA8K_REG_MASK_CTRL 0x000 #define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0) -#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0) +#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x) #define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8) -#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8) +#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x) #define QCA8K_REG_PORT0_PAD_CTRL 0x004 #define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31) #define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19) @@ -41,12 +46,11 @@ #define QCA8K_REG_PORT6_PAD_CTRL 0x00c #define QCA8K_PORT_PAD_RGMII_EN BIT(26) #define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22) -#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22) +#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x) #define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20) -#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20) +#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x) #define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25) #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) -#define QCA8K_MAX_DELAY 3 #define QCA8K_PORT_PAD_SGMII_EN BIT(7) #define QCA8K_REG_PWS 0x010 #define QCA8K_PWS_POWER_ON_SEL BIT(31) @@ -68,10 +72,12 @@ #define QCA8K_MDIO_MASTER_READ BIT(27) #define QCA8K_MDIO_MASTER_WRITE 0 #define QCA8K_MDIO_MASTER_SUP_PRE BIT(26) -#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21) -#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16) -#define QCA8K_MDIO_MASTER_DATA(x) (x) +#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21) +#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x) +#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16) +#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x) #define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0) +#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x) #define QCA8K_MDIO_MASTER_MAX_PORTS 5 #define QCA8K_MDIO_MASTER_MAX_REG 32 #define QCA8K_GOL_MAC_ADDR0 0x60 @@ -93,9 +99,7 @@ #define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) -#define QCA8K_PORT_HDR_CTRL_RX_S 2 #define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0) -#define QCA8K_PORT_HDR_CTRL_TX_S 0 #define QCA8K_PORT_HDR_CTRL_ALL 2 #define QCA8K_PORT_HDR_CTRL_MGMT 1 #define QCA8K_PORT_HDR_CTRL_NONE 0 @@ -105,10 +109,11 @@ #define QCA8K_SGMII_EN_TX BIT(3) #define QCA8K_SGMII_EN_SD BIT(4) #define QCA8K_SGMII_CLK125M_DELAY BIT(7) -#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23)) -#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22) -#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) -#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) +#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22) +#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x) +#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0) +#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1) +#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2) /* MAC_PWR_SEL registers */ #define QCA8K_REG_MAC_PWR_SEL 0x0e4 @@ -119,102 +124,152 @@ #define QCA8K_REG_EEE_CTRL 0x100 #define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2) +/* TRUNK_HASH_EN registers */ +#define QCA8K_TRUNK_HASH_EN_CTRL 0x270 +#define QCA8K_TRUNK_HASH_SIP_EN BIT(3) +#define QCA8K_TRUNK_HASH_DIP_EN BIT(2) +#define QCA8K_TRUNK_HASH_SA_EN BIT(1) +#define QCA8K_TRUNK_HASH_DA_EN BIT(0) +#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0) + /* ACL registers */ #define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8)) -#define QCA8K_PORT_VLAN_CVID(x) (x << 16) -#define QCA8K_PORT_VLAN_SVID(x) x +#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16) +#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x) +#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0) +#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x) #define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8)) #define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470 #define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474 /* Lookup registers */ #define QCA8K_REG_ATU_DATA0 0x600 -#define QCA8K_ATU_ADDR2_S 24 -#define QCA8K_ATU_ADDR3_S 16 -#define QCA8K_ATU_ADDR4_S 8 +#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24) +#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16) +#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8) +#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0) #define QCA8K_REG_ATU_DATA1 0x604 -#define QCA8K_ATU_PORT_M 0x7f -#define QCA8K_ATU_PORT_S 16 -#define QCA8K_ATU_ADDR0_S 8 +#define QCA8K_ATU_PORT_MASK GENMASK(22, 16) +#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8) +#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0) #define QCA8K_REG_ATU_DATA2 0x608 -#define QCA8K_ATU_VID_M 0xfff -#define QCA8K_ATU_VID_S 8 -#define QCA8K_ATU_STATUS_M 0xf +#define QCA8K_ATU_VID_MASK GENMASK(19, 8) +#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0) #define QCA8K_ATU_STATUS_STATIC 0xf #define QCA8K_REG_ATU_FUNC 0x60c #define QCA8K_ATU_FUNC_BUSY BIT(31) #define QCA8K_ATU_FUNC_PORT_EN BIT(14) #define QCA8K_ATU_FUNC_MULTI_EN BIT(13) #define QCA8K_ATU_FUNC_FULL BIT(12) -#define QCA8K_ATU_FUNC_PORT_M 0xf -#define QCA8K_ATU_FUNC_PORT_S 8 +#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8) #define QCA8K_REG_VTU_FUNC0 0x610 #define QCA8K_VTU_FUNC0_VALID BIT(20) #define QCA8K_VTU_FUNC0_IVL_EN BIT(19) -#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) -#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 -#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 -#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 -#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 -#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 +/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4) + * It does contain VLAN_MODE for each port [5:4] for port0, + * [7:6] for port1 ... [17:16] for port6. Use virtual port + * define to handle this. + */ +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2) +#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0) +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) +#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0) +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) +#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1) +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) +#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2) +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) +#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3) +#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i)) #define QCA8K_REG_VTU_FUNC1 0x614 #define QCA8K_VTU_FUNC1_BUSY BIT(31) -#define QCA8K_VTU_FUNC1_VID_S 16 +#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16) #define QCA8K_VTU_FUNC1_FULL BIT(4) +#define QCA8K_REG_ATU_CTRL 0x618 +#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0) +#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x)) #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) +#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4) #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 -#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24 -#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16 -#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8 -#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 +#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24) +#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16) +#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8) +#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0) #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) -#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) -#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) -#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) -#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) -#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3) #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) -#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) -#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) -#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16) -#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16) -#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16) -#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16) +#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x) +#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0) +#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1) +#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2) +#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3) +#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4) #define QCA8K_PORT_LOOKUP_LEARN BIT(20) +#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25) + +#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700 +/* 4 max trunk first + * first 6 bit for member bitmap + * 7th bit is to enable trunk port + */ +#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8) +#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7) +#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) +#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0) +#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i)) +/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */ +#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4)) +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0) +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3) +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0) +#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16) +#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4) +/* Complex shift: FIRST shift for port THEN shift for trunk */ +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i)) +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) +#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j)) #define QCA8K_REG_GLOBAL_FC_THRESH 0x800 -#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16) -#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16) -#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0) -#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0) +#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16) +#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x) +#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0) +#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x) #define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20) -#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20) -#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24) -#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20) +#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x) +#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24) +#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x) #define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) -#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0) -#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0) +#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0) +#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x) #define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6) #define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7) #define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8) #define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) /* Pkt edit registers */ +#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2)) +#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) +#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i)) #define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2))) /* L3 registers */ @@ -244,6 +299,7 @@ enum qca8k_fdb_cmd { QCA8K_FDB_FLUSH = 1, QCA8K_FDB_LOAD = 2, QCA8K_FDB_PURGE = 3, + QCA8K_FDB_FLUSH_PORT = 5, QCA8K_FDB_NEXT = 6, QCA8K_FDB_SEARCH = 7, }; @@ -264,6 +320,7 @@ struct ar8xxx_port_status { struct qca8k_match_data { u8 id; bool reduced_package; + u8 mib_count; }; enum { @@ -282,6 +339,9 @@ struct qca8k_ports_config { struct qca8k_priv { u8 switch_id; u8 switch_revision; + u8 mirror_rx; + u8 mirror_tx; + u8 lag_hash_mode; bool legacy_phy_port_mapping; struct qca8k_ports_config ports_config; struct regmap *regmap; diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c index c66ebd0ee217..aae46ada8d83 100644 --- a/drivers/net/dsa/realtek-smi-core.c +++ b/drivers/net/dsa/realtek-smi-core.c @@ -456,7 +456,7 @@ static int realtek_smi_probe(struct platform_device *pdev) smi->ds->ops = var->ds_ops; ret = dsa_register_switch(smi->ds); if (ret) { - dev_err(dev, "unable to register switch ret = %d\n", ret); + dev_err_probe(dev, ret, "unable to register switch\n"); return ret; } return 0; diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c index baaae97283c5..3b729544798b 100644 --- a/drivers/net/dsa/rtl8365mb.c +++ b/drivers/net/dsa/rtl8365mb.c @@ -107,6 +107,7 @@ #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112 /* Family-specific data and limits */ +#define RTL8365MB_PHYADDRMAX 7 #define RTL8365MB_NUM_PHYREGS 32 #define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1) #define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1) @@ -176,7 +177,7 @@ #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0) -#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5) +#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5) #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8) #define RTL8365MB_PHY_BASE 0x2000 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03 @@ -276,7 +277,7 @@ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport)) #define RTL8365MB_PORT_ISOLATION_MASK 0x07FF -/* MSTP port state registers - indexed by tree instancrSTI (tree ine */ +/* MSTP port state registers - indexed by tree instance */ #define RTL8365MB_MSTI_CTRL_BASE 0x0A00 #define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3)) @@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum) u16 val; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; @@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum, u32 ocp_addr; int ret; + if (phy > RTL8365MB_PHYADDRMAX) + return -EINVAL; + if (regnum > RTL8365MB_PHYREGMAX) return -EINVAL; @@ -760,7 +767,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port, * 0 = no delay, 1 = 2 ns delay * RX delay: * 0 = no delay, 7 = maximum delay - * No units are specified, but there are a total of 8 steps. + * Each step is approximately 0.3 ns, so the maximum delay is about + * 2.1 ns. * * The vendor driver also states that this must be configured *before* * forcing the external interface into a particular mode, which is done @@ -771,10 +779,6 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port, * specified. We ignore the detail of the RGMII interface mode * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only * property. - * - * For the RX delay, we assume that a register value of 4 corresponds to - * 2 ns. But this is just an educated guess, so ignore all other values - * to avoid too much confusion. */ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) { val = val / 1000; /* convert to ns */ @@ -787,13 +791,13 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port, } if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) { - val = val / 1000; /* convert to ns */ + val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */ - if (val == 0 || val == 2) - rx_delay = val * 2; + if (val <= 7) + rx_delay = val; else dev_warn(smi->dev, - "EXT port RX delay must be 0 to 2 ns\n"); + "EXT port RX delay must be 0 to 2.1 ns\n"); } ret = regmap_update_bits( @@ -896,7 +900,8 @@ static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, { if (dsa_is_user_port(ds, port) && (interface == PHY_INTERFACE_MODE_NA || - interface == PHY_INTERFACE_MODE_INTERNAL)) + interface == PHY_INTERFACE_MODE_INTERNAL || + interface == PHY_INTERFACE_MODE_GMII)) /* Internal PHY */ return true; else if (dsa_is_cpu_port(ds, port) && diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index 03deacd83e61..ecc19bd5115f 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -1186,7 +1186,8 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) static int rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { struct realtek_smi *smi = ds->priv; unsigned int port_bitmap = 0; @@ -1198,7 +1199,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, if (i == port) continue; /* Not on this bridge */ - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Join this port to each other port on the bridge */ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), @@ -1218,7 +1219,7 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, static void rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { struct realtek_smi *smi = ds->priv; unsigned int port_bitmap = 0; @@ -1230,7 +1231,7 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, if (i == port) continue; /* Not on this bridge */ - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* Remove this port from any other port on the bridge */ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 21dba16af097..9ba2ec2b966d 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -249,6 +249,7 @@ struct sja1105_private { bool fixed_link[SJA1105_MAX_NUM_PORTS]; unsigned long ucast_egress_floods; unsigned long bcast_egress_floods; + unsigned long hwts_tx_en; const struct sja1105_info *info; size_t max_xfer_len; struct spi_device *spidev; @@ -256,11 +257,13 @@ struct sja1105_private { u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS]; struct sja1105_flow_block flow_block; - struct sja1105_port ports[SJA1105_MAX_NUM_PORTS]; /* Serializes transmission of management frames so that * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; + /* PTP two-step TX timestamp ID, and its serialization lock */ + spinlock_t ts_id_lock; + u8 ts_id; /* Serializes access to the dynamic config interface */ struct mutex dynamic_config_lock; struct devlink_region **regions; @@ -269,7 +272,6 @@ struct sja1105_private { struct mii_bus *mdio_base_tx; struct mii_bus *mdio_pcs; struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS]; - struct sja1105_tagger_data tagger_data; struct sja1105_ptp_data ptp_data; struct sja1105_tas_data tas_data; }; diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 72b9b39b0989..7dcdd784aea4 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -379,7 +379,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port, vl_rule = true; rc = sja1105_vl_gate(priv, port, extack, cookie, - &key, act->gate.index, + &key, act->hw_index, act->gate.prio, act->gate.basetime, act->gate.cycletime, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index c343effe2e96..b513713be610 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -118,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) static int sja1105_commit_pvid(struct dsa_switch *ds, int port) { struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); struct sja1105_private *priv = ds->priv; struct sja1105_vlan_lookup_entry *vlan; bool drop_untagged = false; int match, rc; u16 pvid; - if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev)) + if (br && br_vlan_enabled(br)) pvid = priv->bridge_pvid[port]; else pvid = priv->tag_8021q_pvid[port]; @@ -1979,7 +1980,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv) } static int sja1105_bridge_member(struct dsa_switch *ds, int port, - struct net_device *br, bool member) + struct dsa_bridge bridge, bool member) { struct sja1105_l2_forwarding_entry *l2_fwd; struct sja1105_private *priv = ds->priv; @@ -2004,7 +2005,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port, */ if (i == port) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; sja1105_port_allow_traffic(l2_fwd, i, port, member); sja1105_port_allow_traffic(l2_fwd, port, i, member); @@ -2073,15 +2074,31 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, } static int sja1105_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload) { - return sja1105_bridge_member(ds, port, br, true); + int rc; + + rc = sja1105_bridge_member(ds, port, bridge, true); + if (rc) + return rc; + + rc = dsa_tag_8021q_bridge_tx_fwd_offload(ds, port, bridge); + if (rc) { + sja1105_bridge_member(ds, port, bridge, false); + return rc; + } + + *tx_fwd_offload = true; + + return 0; } static void sja1105_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { - sja1105_bridge_member(ds, port, br, false); + dsa_tag_8021q_bridge_tx_fwd_unoffload(ds, port, bridge); + sja1105_bridge_member(ds, port, bridge, false); } #define BYTES_PER_KBIT (1000LL / 8) @@ -2587,8 +2604,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port, if (netif_is_bridge_master(upper)) { list_for_each_entry(dp, &dst->ports, list) { - if (dp->bridge_dev && dp->bridge_dev != upper && - br_vlan_enabled(dp->bridge_dev)) { + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br && br != upper && br_vlan_enabled(br)) { NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported"); return -EBUSY; @@ -2599,18 +2617,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port, return 0; } -static void sja1105_port_disable(struct dsa_switch *ds, int port) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - return; - - kthread_cancel_work_sync(&sp->xmit_work); - skb_queue_purge(&sp->xmit_queue); -} - static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, struct sk_buff *skb, bool takets) { @@ -2669,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, return NETDEV_TX_OK; } -#define work_to_port(work) \ - container_of((work), struct sja1105_port, xmit_work) -#define tagger_to_sja1105(t) \ - container_of((t), struct sja1105_private, tagger_data) +#define work_to_xmit_work(w) \ + container_of((w), struct sja1105_deferred_xmit_work, work) /* Deferred work is unfortunately necessary because setting up the management * route cannot be done from atomit context (SPI transfer takes a sleepable @@ -2680,25 +2684,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, */ static void sja1105_port_deferred_xmit(struct kthread_work *work) { - struct sja1105_port *sp = work_to_port(work); - struct sja1105_tagger_data *tagger_data = sp->data; - struct sja1105_private *priv = tagger_to_sja1105(tagger_data); - int port = sp - priv->ports; - struct sk_buff *skb; + struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct sk_buff *clone, *skb = xmit_work->skb; + struct dsa_switch *ds = xmit_work->dp->ds; + struct sja1105_private *priv = ds->priv; + int port = xmit_work->dp->index; - while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { - struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; + clone = SJA1105_SKB_CB(skb)->clone; - mutex_lock(&priv->mgmt_lock); + mutex_lock(&priv->mgmt_lock); - sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); + sja1105_mgmt_xmit(ds, port, 0, skb, !!clone); - /* The clone, if there, was made by dsa_skb_tx_timestamp */ - if (clone) - sja1105_ptp_txtstamp_skb(priv->ds, port, clone); + /* The clone, if there, was made by dsa_skb_tx_timestamp */ + if (clone) + sja1105_ptp_txtstamp_skb(ds, port, clone); - mutex_unlock(&priv->mgmt_lock); - } + mutex_unlock(&priv->mgmt_lock); + + kfree(xmit_work); +} + +static int sja1105_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data; + + if (proto != priv->info->tag_proto) + return -EPROTONOSUPPORT; + + tagger_data = sja1105_tagger_data(ds); + tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; + tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; + + return 0; } /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, @@ -3001,58 +3021,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, return 0; } -static void sja1105_teardown_ports(struct sja1105_private *priv) -{ - struct dsa_switch *ds = priv->ds; - int port; - - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (sp->xmit_worker) - kthread_destroy_worker(sp->xmit_worker); - } -} - -static int sja1105_setup_ports(struct sja1105_private *priv) -{ - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; - struct dsa_switch *ds = priv->ds; - int port, rc; - - /* Connections between dsa_port and sja1105_port */ - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - struct dsa_port *dp = dsa_to_port(ds, port); - struct kthread_worker *worker; - struct net_device *slave; - - if (!dsa_port_is_user(dp)) - continue; - - dp->priv = sp; - sp->data = tagger_data; - slave = dp->slave; - kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); - worker = kthread_create_worker(0, "%s_xmit", slave->name); - if (IS_ERR(worker)) { - rc = PTR_ERR(worker); - dev_err(ds->dev, - "failed to create deferred xmit thread: %d\n", - rc); - goto out_destroy_workers; - } - sp->xmit_worker = worker; - skb_queue_head_init(&sp->xmit_queue); - } - - return 0; - -out_destroy_workers: - sja1105_teardown_ports(priv); - return rc; -} - /* The programming model for the SJA1105 switch is "all-at-once" via static * configuration tables. Some of these can be dynamically modified at runtime, * but not the xMII mode parameters table. @@ -3098,10 +3066,6 @@ static int sja1105_setup(struct dsa_switch *ds) } } - rc = sja1105_setup_ports(priv); - if (rc) - goto out_static_config_free; - sja1105_tas_setup(ds); sja1105_flower_setup(ds); @@ -3139,7 +3103,7 @@ static int sja1105_setup(struct dsa_switch *ds) ds->vlan_filtering_is_global = true; ds->untag_bridge_pvid = true; /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */ - ds->num_fwd_offloading_bridges = 7; + ds->max_num_bridges = 7; /* Advertise the 8 egress queues */ ds->num_tx_queues = SJA1105_NUM_TC; @@ -3158,7 +3122,6 @@ out_ptp_clock_unregister: out_flower_teardown: sja1105_flower_teardown(ds); sja1105_tas_teardown(ds); - sja1105_teardown_ports(priv); out_static_config_free: sja1105_static_config_free(&priv->static_config); @@ -3178,12 +3141,12 @@ static void sja1105_teardown(struct dsa_switch *ds) sja1105_ptp_clock_unregister(ds); sja1105_flower_teardown(ds); sja1105_tas_teardown(ds); - sja1105_teardown_ports(priv); sja1105_static_config_free(&priv->static_config); } static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, + .connect_tag_protocol = sja1105_connect_tag_protocol, .setup = sja1105_setup, .teardown = sja1105_teardown, .set_ageing_time = sja1105_set_ageing_time, @@ -3197,7 +3160,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, - .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, @@ -3228,8 +3190,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add, .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del, .port_prechangeupper = sja1105_prechangeupper, - .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload, - .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload, }; static const struct of_device_id sja1105_dt_ids[]; @@ -3367,6 +3327,7 @@ static int sja1105_probe(struct spi_device *spi) mutex_init(&priv->ptp_data.lock); mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + spin_lock_init(&priv->ts_id_lock); rc = sja1105_parse_dt(priv); if (rc < 0) { diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 54396992a919..be3068a935af 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -58,13 +58,12 @@ enum sja1105_ptp_clk_mode { #define ptp_data_to_sja1105(d) \ container_of((d), struct sja1105_private, ptp_data) -/* Must be called only with priv->tagger_data.state bit - * SJA1105_HWTS_RX_EN cleared +/* Must be called only while the RX timestamping state of the tagger + * is turned off */ static int sja1105_change_rxtstamping(struct sja1105_private *priv, bool on) { - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; struct sja1105_general_params_entry *general_params; struct sja1105_table *table; @@ -74,13 +73,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv, general_params->send_meta1 = on; general_params->send_meta0 = on; - /* Initialize the meta state machine to a known state */ - if (priv->tagger_data.stampable_skb) { - kfree_skb(priv->tagger_data.stampable_skb); - priv->tagger_data.stampable_skb = NULL; - } ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&tagger_data->skb_txtstamp_queue); + skb_queue_purge(&ptp_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING); @@ -88,6 +82,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv, int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct hwtstamp_config config; bool rx_on; @@ -98,10 +93,10 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->ports[port].hwts_tx_en = false; + priv->hwts_tx_en &= ~BIT(port); break; case HWTSTAMP_TX_ON: - priv->ports[port].hwts_tx_en = true; + priv->hwts_tx_en |= BIT(port); break; default: return -ERANGE; @@ -116,8 +111,8 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) break; } - if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { - clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + if (rx_on != tagger_data->rxtstamp_get_state(ds)) { + tagger_data->rxtstamp_set_state(ds, false); rc = sja1105_change_rxtstamping(priv, rx_on); if (rc < 0) { @@ -126,7 +121,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) return rc; } if (rx_on) - set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + tagger_data->rxtstamp_set_state(ds, true); } if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) @@ -136,15 +131,16 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct hwtstamp_config config; config.flags = 0; - if (priv->ports[port].hwts_tx_en) + if (priv->hwts_tx_en & BIT(port)) config.tx_type = HWTSTAMP_TX_ON; else config.tx_type = HWTSTAMP_TX_OFF; - if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + if (tagger_data->rxtstamp_get_state(ds)) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; else config.rx_filter = HWTSTAMP_FILTER_NONE; @@ -417,10 +413,11 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp) bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { + struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds); struct sja1105_private *priv = ds->priv; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; - if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + if (!tagger_data->rxtstamp_get_state(ds)) return false; /* We need to read the full PTP clock to reconstruct the Rx @@ -453,6 +450,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, return priv->info->rxtstamp(ds, port, skb); } +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct skb_shared_hwtstamps shwt = {0}; + + /* We don't care about RX timestamps on the CPU port */ + if (dir == SJA1110_META_TSTAMP_RX) + return; + + spin_lock(&ptp_data->skb_txtstamp_queue.lock); + + skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) { + if (SJA1105_SKB_CB(skb)->ts_id != ts_id) + continue; + + __skb_unlink(skb, &ptp_data->skb_txtstamp_queue); + skb_match = skb; + + break; + } + + spin_unlock(&ptp_data->skb_txtstamp_queue.lock); + + if (WARN_ON(!skb_match)) + return; + + shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); + skb_complete_tx_timestamp(skb_match, &shwt); +} + /* In addition to cloning the skb which is done by the common * sja1105_port_txtstamp, we need to generate a timestamp ID and save the * packet to the TX timestamping queue. @@ -461,22 +491,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; u8 ts_id; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - spin_lock(&sp->data->meta_lock); + spin_lock(&priv->ts_id_lock); - ts_id = sp->data->ts_id; + ts_id = priv->ts_id; /* Deal automatically with 8-bit wraparound */ - sp->data->ts_id++; + priv->ts_id++; SJA1105_SKB_CB(clone)->ts_id = ts_id; - spin_unlock(&sp->data->meta_lock); + spin_unlock(&priv->ts_id_lock); - skb_queue_tail(&sp->data->skb_txtstamp_queue, clone); + skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone); } /* Called from dsa_skb_tx_timestamp. This callback is just to clone @@ -486,10 +516,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; struct sk_buff *clone; - if (!sp->hwts_tx_en) + if (!(priv->hwts_tx_en & BIT(port))) return; clone = skb_clone_sk(skb); @@ -896,7 +925,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = { int sja1105_ptp_clock_register(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; ptp_data->caps = (struct ptp_clock_info) { @@ -919,8 +947,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) /* Only used on SJA1105 */ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue); /* Only used on SJA1110 */ - skb_queue_head_init(&tagger_data->skb_txtstamp_queue); - spin_lock_init(&tagger_data->meta_lock); + skb_queue_head_init(&ptp_data->skb_txtstamp_queue); ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev); if (IS_ERR_OR_NULL(ptp_data->clock)) @@ -937,7 +964,6 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; - struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; if (IS_ERR_OR_NULL(ptp_data->clock)) @@ -945,7 +971,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) del_timer_sync(&ptp_data->extts_timer); ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&tagger_data->skb_txtstamp_queue); + skb_queue_purge(&ptp_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); ptp_clock_unregister(ptp_data->clock); ptp_data->clock = NULL; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 3ae6b9fdd492..416461ee95d2 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -8,6 +8,21 @@ #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) +/* Timestamps are in units of 8 ns clock ticks (equivalent to + * a fixed 125 MHz clock). + */ +#define SJA1105_TICK_NS 8 + +static inline s64 ns_to_sja1105_ticks(s64 ns) +{ + return ns / SJA1105_TICK_NS; +} + +static inline s64 sja1105_ticks_to_ns(s64 ticks) +{ + return ticks * SJA1105_TICK_NS; +} + /* Calculate the first base_time in the future that satisfies this * relationship: * @@ -62,6 +77,10 @@ struct sja1105_ptp_data { struct timer_list extts_timer; /* Used only on SJA1105 to reconstruct partial timestamps */ struct sk_buff_head skb_rxtstamp_queue; + /* Used on SJA1110 where meta frames are generated only for + * 2-step TX timestamps + */ + struct sk_buff_head skb_txtstamp_queue; struct ptp_clock_info caps; struct ptp_clock *clock; struct sja1105_ptp_cmd cmd; @@ -112,6 +131,9 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); +void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, + enum sja1110_meta_tstamp dir, u64 tstamp); + #else struct sja1105_ptp_cmd; @@ -178,6 +200,8 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds, #define sja1110_rxtstamp NULL #define sja1110_txtstamp NULL +#define sja1110_process_meta_tstamp NULL + #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ #endif /* _SJA1105_PTP_H */ diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index a4b1447ff055..ae55167ce0a6 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -1122,9 +1122,6 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc) vsc->gc.ngpio = 4; vsc->gc.owner = THIS_MODULE; vsc->gc.parent = vsc->dev; -#if IS_ENABLED(CONFIG_OF_GPIO) - vsc->gc.of_node = vsc->dev->of_node; -#endif vsc->gc.base = -1; vsc->gc.get = vsc73xx_gpio_get; vsc->gc.set = vsc73xx_gpio_set; @@ -1216,12 +1213,10 @@ int vsc73xx_probe(struct vsc73xx *vsc) } EXPORT_SYMBOL(vsc73xx_probe); -int vsc73xx_remove(struct vsc73xx *vsc) +void vsc73xx_remove(struct vsc73xx *vsc) { dsa_unregister_switch(vsc->ds); gpiod_set_value(vsc->reset, 1); - - return 0; } EXPORT_SYMBOL(vsc73xx_remove); diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h index 30b951504e65..30b1f0a36566 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.h +++ b/drivers/net/dsa/vitesse-vsc73xx.h @@ -26,5 +26,5 @@ struct vsc73xx_ops { int vsc73xx_is_addr_valid(u8 block, u8 subblock); int vsc73xx_probe(struct vsc73xx *vsc); -int vsc73xx_remove(struct vsc73xx *vsc); +void vsc73xx_remove(struct vsc73xx *vsc); void vsc73xx_shutdown(struct vsc73xx *vsc); diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 910fcb3b252b..0730352cdd57 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -5,6 +5,7 @@ */ #include <net/dsa.h> +#include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <linux/of_device.h> #include <linux/netdev_features.h> @@ -501,7 +502,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, } static int xrs700x_bridge_common(struct dsa_switch *ds, int port, - struct net_device *bridge, bool join) + struct dsa_bridge bridge, bool join) { unsigned int i, cpu_mask = 0, mask = 0; struct xrs700x *priv = ds->priv; @@ -513,14 +514,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port, cpu_mask |= BIT(i); - if (dsa_to_port(ds, i)->bridge_dev == bridge) + if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; mask |= BIT(i); } for (i = 0; i < ds->num_ports; i++) { - if (dsa_to_port(ds, i)->bridge_dev != bridge) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; /* 1 = Disable forwarding to the port */ @@ -540,13 +541,13 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port, } static int xrs700x_bridge_join(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge, bool *tx_fwd_offload) { return xrs700x_bridge_common(ds, port, bridge, true); } static void xrs700x_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *bridge) + struct dsa_bridge bridge) { xrs700x_bridge_common(ds, port, bridge, false); } diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 8ef34901c2d8..1111d1f33865 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) list_del(&slave->list); queue->num_slaves--; slave->dev->flags &= ~IFF_SLAVE; - dev_put(slave->dev); + dev_put_track(slave->dev, &slave->dev_tracker); kfree(slave); } @@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) if (duplicate_slave) eql_kill_one_slave(queue, duplicate_slave); - dev_hold(slave->dev); + dev_hold_track(slave->dev, &slave->dev_tracker, GFP_ATOMIC); list_add(&slave->list, &queue->all_slaves); queue->num_slaves++; slave->dev->flags |= IFF_SLAVE; diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 05e15b6e5e2c..481f1df3106c 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1138,7 +1138,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) } static void -typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { ering->rx_max_pending = RXENT_ENTRIES; ering->tx_max_pending = TXLO_ENTRIES - 1; diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c index 941754ea78ec..1df7601af86a 100644 --- a/drivers/net/ethernet/8390/hydra.c +++ b/drivers/net/ethernet/8390/hydra.c @@ -116,6 +116,7 @@ static int hydra_init(struct zorro_dev *z) unsigned long ioaddr = board+HYDRA_NIC_BASE; const char name[] = "NE2000"; int start_page, stop_page; + u8 macaddr[ETH_ALEN]; int j; int err; @@ -129,7 +130,8 @@ static int hydra_init(struct zorro_dev *z) return -ENOMEM; for (j = 0; j < ETH_ALEN; j++) - dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); + macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); + eth_hw_addr_set(dev, macaddr); /* We must set the 8390 for word mode. */ z_writeb(0x4b, ioaddr + NE_EN0_DCFG); diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index 91b04abfd687..7fb819b9b89a 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -292,6 +292,7 @@ static bool mac8390_rsrc_init(struct net_device *dev, struct nubus_dirent ent; int offset; volatile unsigned short *i; + u8 addr[ETH_ALEN]; dev->irq = SLOT2IRQ(board->slot); /* This is getting to be a habit */ @@ -314,7 +315,8 @@ static bool mac8390_rsrc_init(struct net_device *dev, return false; } - nubus_get_rsrc_mem(dev->dev_addr, &ent, 6); + nubus_get_rsrc_mem(addr, &ent, 6); + eth_hw_addr_set(dev, addr); if (useresources[cardtype] == 1) { nubus_rewinddir(&dir); diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 0890fa493f70..6e62c37c9400 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -204,6 +204,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) { int i, retval; int checksum = 0; + u8 macaddr[ETH_ALEN]; const char *model_name; unsigned char eeprom_irq = 0; static unsigned version_printed; @@ -239,7 +240,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); + macaddr[i] = inb(ioaddr + 8 + i); + eth_hw_addr_set(dev, macaddr); netdev_info(dev, "%s at %#3x, %pM", model_name, ioaddr, dev->dev_addr); diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index 263a942d81fa..5b00c452bede 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -168,6 +168,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) int checksum = 0; int ancient = 0; /* An old card without config registers. */ int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ + u8 addr[ETH_ALEN]; const char *model_name; static unsigned version_printed; struct ei_device *ei_local = netdev_priv(dev); @@ -191,7 +192,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) netdev_info(dev, version); for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + 8 + i); + addr[i] = inb(ioaddr + 8 + i); + eth_hw_addr_set(dev, addr); netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 4601b38f532a..db3ec4768159 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -73,6 +73,7 @@ config DNET source "drivers/net/ethernet/dec/Kconfig" source "drivers/net/ethernet/dlink/Kconfig" source "drivers/net/ethernet/emulex/Kconfig" +source "drivers/net/ethernet/engleder/Kconfig" source "drivers/net/ethernet/ezchip/Kconfig" source "drivers/net/ethernet/faraday/Kconfig" source "drivers/net/ethernet/freescale/Kconfig" @@ -182,6 +183,7 @@ source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" source "drivers/net/ethernet/tundra/Kconfig" +source "drivers/net/ethernet/vertexcom/Kconfig" source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index fdd8c6c17451..8a87c1083d1d 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_NET_VENDOR_DEC) += dec/ obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ +obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ @@ -92,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ +obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/ obj-$(CONFIG_NET_VENDOR_VIA) += via/ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index f4edc616388c..537e6a85e18d 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3914,10 +3914,9 @@ static int et131x_pci_setup(struct pci_dev *pdev, pci_set_master(pdev); /* Check the DMA addressing support of this device */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { dev_err(&pdev->dev, "No usable DMA addressing method\n"); - rc = -EIO; goto err_release_res; } diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 800ee022388f..849de4564709 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -29,6 +29,7 @@ #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/soc/sunxi/sunxi_sram.h> +#include <linux/dmaengine.h> #include "sun4i-emac.h" @@ -76,7 +77,6 @@ struct emac_board_info { void __iomem *membase; u32 msg_enable; struct net_device *ndev; - struct sk_buff *skb_last; u16 tx_fifo_stat; int emacrx_completed_flag; @@ -87,6 +87,16 @@ struct emac_board_info { unsigned int duplex; phy_interface_t phy_interface; + struct dma_chan *rx_chan; + phys_addr_t emac_rx_fifo; +}; + +struct emac_dma_req { + struct emac_board_info *db; + struct dma_async_tx_descriptor *desc; + struct sk_buff *skb; + dma_addr_t rxbuf; + int count; }; static void emac_update_speed(struct net_device *dev) @@ -206,6 +216,117 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count) readsl(reg, data, round_up(count, 4) / 4); } +static struct emac_dma_req * +emac_alloc_dma_req(struct emac_board_info *db, + struct dma_async_tx_descriptor *desc, struct sk_buff *skb, + dma_addr_t rxbuf, int count) +{ + struct emac_dma_req *req; + + req = kzalloc(sizeof(struct emac_dma_req), GFP_ATOMIC); + if (!req) + return NULL; + + req->db = db; + req->desc = desc; + req->skb = skb; + req->rxbuf = rxbuf; + req->count = count; + return req; +} + +static void emac_free_dma_req(struct emac_dma_req *req) +{ + kfree(req); +} + +static void emac_dma_done_callback(void *arg) +{ + struct emac_dma_req *req = arg; + struct emac_board_info *db = req->db; + struct sk_buff *skb = req->skb; + struct net_device *dev = db->ndev; + int rxlen = req->count; + u32 reg_val; + + dma_unmap_single(db->dev, req->rxbuf, rxlen, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_bytes += rxlen; + /* Pass to upper layer */ + dev->stats.rx_packets++; + + /* re enable cpu receive */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val &= ~EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + + /* re enable interrupt */ + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + db->emacrx_completed_flag = 1; + emac_free_dma_req(req); +} + +static int emac_dma_inblk_32bit(struct emac_board_info *db, + struct sk_buff *skb, void *rdptr, int count) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + dma_addr_t rxbuf; + struct emac_dma_req *req; + int ret = 0; + + rxbuf = dma_map_single(db->dev, rdptr, count, DMA_FROM_DEVICE); + ret = dma_mapping_error(db->dev, rxbuf); + if (ret) { + dev_err(db->dev, "dma mapping error.\n"); + return ret; + } + + desc = dmaengine_prep_slave_single(db->rx_chan, rxbuf, count, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_err(db->dev, "prepare slave single failed\n"); + ret = -ENOMEM; + goto prepare_err; + } + + req = emac_alloc_dma_req(db, desc, skb, rxbuf, count); + if (!req) { + dev_err(db->dev, "alloc emac dma req error.\n"); + ret = -ENOMEM; + goto alloc_req_err; + } + + desc->callback_param = req; + desc->callback = emac_dma_done_callback; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(db->dev, "dma submit error.\n"); + goto submit_err; + } + + dma_async_issue_pending(db->rx_chan); + return ret; + +submit_err: + emac_free_dma_req(req); + +alloc_req_err: + dmaengine_desc_free(desc); + +prepare_err: + dma_unmap_single(db->dev, rxbuf, count, DMA_FROM_DEVICE); + return ret; +} + /* ethtool ops */ static void emac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) @@ -499,7 +620,6 @@ static void emac_rx(struct net_device *dev) struct sk_buff *skb; u8 *rdptr; bool good_packet; - static int rxlen_last; unsigned int reg_val; u32 rxhdr, rxstatus, rxcount, rxlen; @@ -514,22 +634,6 @@ static void emac_rx(struct net_device *dev) if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RXCount: %x\n", rxcount); - if ((db->skb_last != NULL) && (rxlen_last > 0)) { - dev->stats.rx_bytes += rxlen_last; - - /* Pass to upper layer */ - db->skb_last->protocol = eth_type_trans(db->skb_last, - dev); - netif_rx(db->skb_last); - dev->stats.rx_packets++; - db->skb_last = NULL; - rxlen_last = 0; - - reg_val = readl(db->membase + EMAC_RX_CTL_REG); - reg_val &= ~EMAC_RX_CTL_DMA_EN; - writel(reg_val, db->membase + EMAC_RX_CTL_REG); - } - if (!rxcount) { db->emacrx_completed_flag = 1; reg_val = readl(db->membase + EMAC_INT_CTL_REG); @@ -623,6 +727,19 @@ static void emac_rx(struct net_device *dev) if (netif_msg_rx_status(db)) dev_dbg(db->dev, "RxLen %x\n", rxlen); + if (rxlen >= dev->mtu && db->rx_chan) { + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val |= EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + if (!emac_dma_inblk_32bit(db, skb, rdptr, rxlen)) + break; + + /* re enable cpu receive. then try to receive by emac_inblk_32bit */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val &= ~EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + } + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, rdptr, rxlen); dev->stats.rx_bytes += rxlen; @@ -677,7 +794,12 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id) reg_val = readl(db->membase + EMAC_INT_CTL_REG); reg_val |= (0xf << 0) | (0x01 << 8); writel(reg_val, db->membase + EMAC_INT_CTL_REG); + } else { + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); } + spin_unlock(&db->lock); return IRQ_HANDLED; @@ -782,6 +904,58 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; +static int emac_configure_dma(struct emac_board_info *db) +{ + struct platform_device *pdev = db->pdev; + struct net_device *ndev = db->ndev; + struct dma_slave_config conf = {}; + struct resource *regs; + int err = 0; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + netdev_err(ndev, "get io resource from device failed.\n"); + err = -ENOMEM; + goto out_clear_chan; + } + + netdev_info(ndev, "get io resource from device: %pa, size = %u\n", + ®s->start, (unsigned int)resource_size(regs)); + db->emac_rx_fifo = regs->start + EMAC_RX_IO_DATA_REG; + + db->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(db->rx_chan)) { + netdev_err(ndev, + "failed to request dma channel. dma is disabled\n"); + err = PTR_ERR(db->rx_chan); + goto out_clear_chan; + } + + conf.direction = DMA_DEV_TO_MEM; + conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + conf.src_addr = db->emac_rx_fifo; + conf.dst_maxburst = 4; + conf.src_maxburst = 4; + conf.device_fc = false; + + err = dmaengine_slave_config(db->rx_chan, &conf); + if (err) { + netdev_err(ndev, "config dma slave failed\n"); + err = -EINVAL; + goto out_slave_configure_err; + } + + return err; + +out_slave_configure_err: + dma_release_channel(db->rx_chan); + +out_clear_chan: + db->rx_chan = NULL; + return err; +} + /* Search EMAC board, allocate space and register it */ static int emac_probe(struct platform_device *pdev) @@ -824,6 +998,9 @@ static int emac_probe(struct platform_device *pdev) goto out_iounmap; } + if (emac_configure_dma(db)) + netdev_info(ndev, "configure dma failed. disable dma.\n"); + db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); @@ -906,6 +1083,11 @@ static int emac_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct emac_board_info *db = netdev_priv(ndev); + if (db->rx_chan) { + dmaengine_terminate_all(db->rx_chan); + dma_release_channel(db->rx_chan); + } + unregister_netdev(ndev); sunxi_sram_release(&pdev->dev); clk_disable_unprepare(db->clk); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 732da15a3827..22fe98555b24 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -589,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev, } ap->name = dev->name; - if (ap->pci_using_dac) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= NETIF_F_HIGHDMA; pci_set_drvdata(pdev, dev); @@ -1130,11 +1129,7 @@ static int ace_init(struct net_device *dev) /* * Configure DMA attributes. */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - ap->pci_using_dac = 1; - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - ap->pci_using_dac = 0; - } else { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { ecode = -ENODEV; goto init_error; } diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h index 265fa601a258..ca5ce0cbbad1 100644 --- a/drivers/net/ethernet/alteon/acenic.h +++ b/drivers/net/ethernet/alteon/acenic.h @@ -692,7 +692,6 @@ struct ace_private __attribute__ ((aligned (SMP_CACHE_BYTES))); u32 last_tx, last_std_rx, last_mini_rx; #endif - int pci_using_dac; u8 firmware_major; u8 firmware_minor; u8 firmware_fix; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index d75d95a97dd9..993b2fb42961 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rxdescmem_busaddr = dma_res->start; } else { + ret = -ENODEV; goto err_free_netdev; } - if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)); - else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) { dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); - else + } else { + ret = -EIO; goto err_free_netdev; + } /* MAC address space */ ret = request_and_map(pdev, "control_port", &control_port, diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index f5ec35fa4c63..466ad9470d1f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id { ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; +/* device capabilities */ +enum ena_admin_aq_caps_id { + ENA_ADMIN_ENI_STATS = 0, +}; + enum ena_admin_placement_policy_type { /* descriptors and headers are in host memory */ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, @@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc { */ u32 supported_features; - u32 reserved3; + /* bitmap of ena_admin_aq_caps_id, which represents device + * capabilities. + */ + u32 capabilities; /* Indicates how many bits are used physical address access. */ u32 phys_addr_width; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ab413fc1f68e..8c8b4c88c7de 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, sizeof(get_resp.u.dev_attr)); ena_dev->supported_features = get_resp.u.dev_attr.supported_features; + ena_dev->capabilities = get_resp.u.dev_attr.capabilities; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { rc = ena_com_get_feature(ena_dev, &get_resp, @@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, struct ena_com_stats_ctx ctx; int ret; + if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { + netdev_err(ena_dev->net_device, + "Capability %d isn't supported\n", + ENA_ADMIN_ENI_STATS); + return -EOPNOTSUPP; + } + memset(&ctx, 0x0, sizeof(ctx)); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); if (likely(ret == 0)) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 73b03ce59412..3c5081d9d25d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -314,6 +314,7 @@ struct ena_com_dev { struct ena_rss rss; u32 supported_features; + u32 capabilities; u32 dma_addr_bits; struct ena_host_attribute host_attr; @@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d ena_dev->adaptive_coalescing = false; } +/* ena_com_get_cap - query whether device supports a capability. + * @ena_dev: ENA communication layer struct + * @cap_id: enum value representing the capability + * + * @return - true if capability is supported or false otherwise + */ +static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev, + enum ena_admin_aq_caps_id cap_id) +{ + return !!(ena_dev->capabilities & BIT(cap_id)); +} + /* ena_com_update_intr_reg - Prepare interrupt register * @intr_reg: interrupt register to update. * @rx_delay_interval: Rx interval in usecs diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 13e745cf3781..39242c5a1729 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(csum_good), ENA_STAT_RX_ENTRY(refil_partial), - ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(csum_bad), ENA_STAT_RX_ENTRY(page_alloc_fail), ENA_STAT_RX_ENTRY(skb_alloc_fail), ENA_STAT_RX_ENTRY(dma_mapping_err), @@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = { #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) -#define ENA_STATS_ARRAY_ENI(adapter) \ - (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported) +#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings) static void ena_safe_update_stat(u64 *src, u64 *dst, struct u64_stats_sync *syncp) @@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev, u64 *data) { struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *dev = adapter->ena_dev; - ena_get_stats(adapter, data, adapter->eni_stats_supported); + ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); } static int ena_get_sw_stats_count(struct ena_adapter *adapter) @@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter) static int ena_get_hw_stats_count(struct ena_adapter *adapter) { - return ENA_STATS_ARRAY_ENI(adapter); + bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS); + + return ENA_STATS_ARRAY_ENI(adapter) * supported; } int ena_get_sset_count(struct net_device *netdev, int sset) @@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev, u8 *data) { struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *dev = adapter->ena_dev; switch (sset) { case ETH_SS_STATS: - ena_get_strings(adapter, data, adapter->eni_stats_supported); + ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); break; } } @@ -465,7 +468,9 @@ static void ena_get_drvinfo(struct net_device *dev, } static void ena_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); @@ -476,7 +481,9 @@ static void ena_get_ringparam(struct net_device *netdev, } static int ena_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); u32 new_tx_size, new_rx_size; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7d5d885d85d5..53080fd143dc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) return; - adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; + ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD); ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); @@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev, "Failed to prepare tx bufs\n"); ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp); - if (rc != -ENOMEM) { - adapter->reset_reason = - ENA_REGS_RESET_DRIVER_INVALID_STATE; - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - } + if (rc != -ENOMEM) + ena_reset_device(adapter, + ENA_REGS_RESET_DRIVER_INVALID_STATE); return rc; } @@ -434,7 +432,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) xdp_stat = &rx_ring->rx_stats.xdp_pass; break; default: - bpf_warn_invalid_xdp_action(verdict); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; } @@ -1269,45 +1267,39 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, netif_err(ring->adapter, tx_done, ring->netdev, - "tx_info doesn't have valid %s", - is_xdp ? "xdp frame" : "skb"); + "tx_info doesn't have valid %s. qid %u req_id %u", + is_xdp ? "xdp frame" : "skb", ring->qid, req_id); else netif_err(ring->adapter, tx_done, ring->netdev, - "Invalid req_id: %hu\n", - req_id); + "Invalid req_id %u in qid %u\n", + req_id, ring->qid); ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); + ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); - /* Trigger device reset */ - ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; - set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags); return -EFAULT; } static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) { - struct ena_tx_buffer *tx_info = NULL; + struct ena_tx_buffer *tx_info; - if (likely(req_id < tx_ring->ring_size)) { - tx_info = &tx_ring->tx_buffer_info[req_id]; - if (likely(tx_info->skb)) - return 0; - } + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->skb)) + return 0; return handle_invalid_req_id(tx_ring, req_id, tx_info, false); } static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) { - struct ena_tx_buffer *tx_info = NULL; + struct ena_tx_buffer *tx_info; - if (likely(req_id < xdp_ring->ring_size)) { - tx_info = &xdp_ring->tx_buffer_info[req_id]; - if (likely(tx_info->xdpf)) - return 0; - } + tx_info = &xdp_ring->tx_buffer_info[req_id]; + if (likely(tx_info->xdpf)) + return 0; return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); } @@ -1332,9 +1324,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); - if (rc) + if (rc) { + if (unlikely(rc == -EINVAL)) + handle_invalid_req_id(tx_ring, req_id, NULL, + false); break; + } + /* validate that the request id points to a valid skb */ rc = validate_tx_req_id(tx_ring, req_id); if (rc) break; @@ -1427,6 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, u16 *next_to_clean) { struct ena_rx_buffer *rx_info; + struct ena_adapter *adapter; u16 len, req_id, buf = 0; struct sk_buff *skb; void *page_addr; @@ -1439,8 +1437,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(!rx_info->page)) { - netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, - "Page is NULL\n"); + adapter = rx_ring->adapter; + netif_err(adapter, rx_err, rx_ring->netdev, + "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); + ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); + ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); return NULL; } @@ -1550,7 +1551,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l3_csum_err))) { /* ipv4 checksum error */ skb->ip_summed = CHECKSUM_NONE; - ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX IPv4 header checksum error\n"); @@ -1562,7 +1563,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { if (unlikely(ena_rx_ctx->l4_csum_err)) { /* TCP/UDP checksum error */ - ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX L4 checksum error\n"); @@ -1773,15 +1774,12 @@ error: if (rc == -ENOSPC) { ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); - adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); } else { ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); - adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); } - - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - return 0; } @@ -1896,9 +1894,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, &req_id); - if (rc) + if (rc) { + if (unlikely(rc == -EINVAL)) + handle_invalid_req_id(xdp_ring, req_id, NULL, + true); break; + } + /* validate that the request id points to a valid xdp_frame */ rc = validate_xdp_req_id(xdp_ring, req_id); if (rc) break; @@ -3240,11 +3243,11 @@ err: int ena_update_hw_stats(struct ena_adapter *adapter) { - int rc = 0; + int rc; rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); if (rc) { - dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n"); + netdev_err(adapter->netdev, "Failed to get ENI stats\n"); return rc; } @@ -3641,8 +3644,6 @@ static int ena_restore_device(struct ena_adapter *adapter) mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); adapter->last_keep_alive_jiffies = jiffies; - dev_err(&pdev->dev, "Device reset completed successfully\n"); - return rc; err_disable_msix: ena_free_mgmnt_irq(adapter); @@ -3672,6 +3673,8 @@ static void ena_fw_reset_device(struct work_struct *work) if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { ena_destroy_device(adapter, false); ena_restore_device(adapter); + + dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); } rtnl_unlock(); @@ -3694,9 +3697,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, netif_err(adapter, rx_err, adapter->netdev, "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", rx_ring->qid); - adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; - smp_mb__before_atomic(); - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + + ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return -EIO; } @@ -3733,9 +3735,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, netif_err(adapter, tx_err, adapter->netdev, "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", tx_ring->qid); - adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; - smp_mb__before_atomic(); - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return -EIO; } @@ -3761,9 +3761,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", missed_tx, adapter->missing_tx_completion_threshold); - adapter->reset_reason = - ENA_REGS_RESET_MISS_TX_CMPL; - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL); rc = -EIO; } @@ -3884,8 +3882,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) "Keep alive watchdog timeout.\n"); ena_increase_stat(&adapter->dev_stats.wd_expired, 1, &adapter->syncp); - adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); } } @@ -3896,8 +3893,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) "ENA admin queue is not in running state!\n"); ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, &adapter->syncp); - adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO); } } @@ -4013,10 +4009,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); - if (unlikely(!max_num_io_queues)) { - dev_err(&pdev->dev, "The device doesn't have io queues\n"); - return -EFAULT; - } return max_num_io_queues; } @@ -4101,7 +4093,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(val)); - if (unlikely(rc && (rc != -EOPNOTSUPP))) { + if (unlikely(rc)) { dev_err(dev, "Cannot fill indirect table\n"); goto err_fill_indir; } @@ -4137,10 +4129,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) } -static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) +static void ena_calc_io_queue_size(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *get_feat_ctx) { - struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; - struct ena_com_dev *ena_dev = ctx->ena_dev; + struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; + struct ena_com_dev *ena_dev = adapter->ena_dev; u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; u32 max_tx_queue_size; @@ -4148,7 +4141,7 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = - &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; + &get_feat_ctx->max_queue_ext.max_queue_ext; max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, max_queue_ext->max_rx_sq_depth); max_tx_queue_size = max_queue_ext->max_tx_cq_depth; @@ -4160,13 +4153,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) max_tx_queue_size = min_t(u32, max_tx_queue_size, max_queue_ext->max_tx_sq_depth); - ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queue_ext->max_per_packet_tx_descs); - ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queue_ext->max_per_packet_rx_descs); + adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_tx_descs); + adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_rx_descs); } else { struct ena_admin_queue_feature_desc *max_queues = - &ctx->get_feat_ctx->max_queues; + &get_feat_ctx->max_queues; max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, max_queues->max_sq_depth); max_tx_queue_size = max_queues->max_cq_depth; @@ -4178,10 +4171,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) max_tx_queue_size = min_t(u32, max_tx_queue_size, max_queues->max_sq_depth); - ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queues->max_packet_tx_descs); - ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queues->max_packet_rx_descs); + adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queues->max_packet_tx_descs); + adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queues->max_packet_rx_descs); } max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); @@ -4195,12 +4188,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) tx_queue_size = rounddown_pow_of_two(tx_queue_size); rx_queue_size = rounddown_pow_of_two(rx_queue_size); - ctx->max_tx_queue_size = max_tx_queue_size; - ctx->max_rx_queue_size = max_rx_queue_size; - ctx->tx_queue_size = tx_queue_size; - ctx->rx_queue_size = rx_queue_size; - - return 0; + adapter->max_tx_ring_size = max_tx_queue_size; + adapter->max_rx_ring_size = max_rx_queue_size; + adapter->requested_tx_ring_size = tx_queue_size; + adapter->requested_rx_ring_size = rx_queue_size; } /* ena_probe - Device Initialization Routine @@ -4215,7 +4206,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) */ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ena_calc_queue_size_ctx calc_queue_ctx = {}; struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; @@ -4300,10 +4290,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - calc_queue_ctx.ena_dev = ena_dev; - calc_queue_ctx.get_feat_ctx = &get_feat_ctx; - calc_queue_ctx.pdev = pdev; - /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ @@ -4311,8 +4297,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); - rc = ena_calc_io_queue_size(&calc_queue_ctx); - if (rc || !max_num_io_queues) { + ena_calc_io_queue_size(adapter, &get_feat_ctx); + if (unlikely(!max_num_io_queues)) { rc = -EFAULT; goto err_device_destroy; } @@ -4321,13 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->reset_reason = ENA_REGS_RESET_NORMAL; - adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; - adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; - adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; - adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; - adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; - adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; - adapter->num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues; adapter->last_monitored_tx_qid = 0; @@ -4378,11 +4357,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_config_debug_area(adapter); - if (!ena_update_hw_stats(adapter)) - adapter->eni_stats_supported = true; - else - adapter->eni_stats_supported = false; - memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0c39fc2fa345..1bdce99bf688 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <uapi/linux/bpf.h> #include "ena_com.h" #include "ena_eth_com.h" @@ -139,18 +140,6 @@ struct ena_napi { struct dim dim; }; -struct ena_calc_queue_size_ctx { - struct ena_com_dev_get_features_ctx *get_feat_ctx; - struct ena_com_dev *ena_dev; - struct pci_dev *pdev; - u32 tx_queue_size; - u32 rx_queue_size; - u32 max_tx_queue_size; - u32 max_rx_queue_size; - u16 max_tx_sgl_size; - u16 max_rx_sgl_size; -}; - struct ena_tx_buffer { struct sk_buff *skb; /* num of ena desc for this specific skb @@ -215,7 +204,7 @@ struct ena_stats_rx { u64 rx_copybreak_pkt; u64 csum_good; u64 refil_partial; - u64 bad_csum; + u64 csum_bad; u64 page_alloc_fail; u64 skb_alloc_fail; u64 dma_mapping_err; @@ -378,7 +367,6 @@ struct ena_adapter { struct u64_stats_sync syncp; struct ena_stats_dev dev_stats; struct ena_admin_eni_stats eni_stats; - bool eni_stats_supported; /* last queue index that was checked for uncompleted tx packets */ u32 last_monitored_tx_qid; @@ -406,6 +394,15 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); int ena_get_sset_count(struct net_device *netdev, int sset); +static inline void ena_reset_device(struct ena_adapter *adapter, + enum ena_regs_reset_reason_types reset_reason) +{ + adapter->reset_reason = reset_reason; + /* Make sure reset reason is set before triggering the reset */ + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); +} + enum ena_xdp_errors_t { ENA_XDP_ALLOWED = 0, ENA_XDP_CURRENT_MTU_TOO_LARGE, diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 2f808dbc8b0e..3a351d3396bf 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z, unsigned long base_addr = board + A2065_LANCE; unsigned long mem_start = board + A2065_RAM; struct resource *r1, *r2; + u8 addr[ETH_ALEN]; u32 serial; int err; @@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z, r2->name = dev->name; serial = be32_to_cpu(z->rom.er_SerialNumber); - dev->dev_addr[0] = 0x00; + addr[0] = 0x00; if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ - dev->dev_addr[1] = 0x80; - dev->dev_addr[2] = 0x10; + addr[1] = 0x80; + addr[2] = 0x10; } else { /* Ameristar */ - dev->dev_addr[1] = 0x00; - dev->dev_addr[2] = 0x9f; + addr[1] = 0x00; + addr[2] = 0x9f; } - dev->dev_addr[3] = (serial >> 16) & 0xff; - dev->dev_addr[4] = (serial >> 8) & 0xff; - dev->dev_addr[5] = serial & 0xff; + addr[3] = (serial >> 16) & 0xff; + addr[4] = (serial >> 8) & 0xff; + addr[5] = serial & 0xff; + eth_hw_addr_set(dev, addr); dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start + A2065_RAM_SIZE; diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 5e0f645f5bde..4ea7b9f3c424 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev) /* Set the Ethernet Hardware Address */ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[0]; + lance->RDP = ((const u_short *)&dev->dev_addr[0])[0]; lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[1]; + lance->RDP = ((const u_short *)&dev->dev_addr[0])[1]; lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */ - lance->RDP = ((u_short *)&dev->dev_addr[0])[2]; + lance->RDP = ((const u_short *)&dev->dev_addr[0])[2]; /* Set the Init Block Mode */ lance->RAP = CSR15; /* Mode Register */ @@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z, unsigned long mem_start = board + ARIADNE_RAM; struct resource *r1, *r2; struct net_device *dev; + u8 addr[ETH_ALEN]; u32 serial; int err; @@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z, r2->name = dev->name; serial = be32_to_cpu(z->rom.er_SerialNumber); - dev->dev_addr[0] = 0x00; - dev->dev_addr[1] = 0x60; - dev->dev_addr[2] = 0x30; - dev->dev_addr[3] = (serial >> 16) & 0xff; - dev->dev_addr[4] = (serial >> 8) & 0xff; - dev->dev_addr[5] = serial & 0xff; + addr[0] = 0x00; + addr[1] = 0x60; + addr[2] = 0x30; + addr[3] = (serial >> 16) & 0xff; + addr[4] = (serial >> 8) & 0xff; + addr[5] = serial & 0xff; + eth_hw_addr_set(dev, addr); dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 9c7d9690d00c..27869164c6e6 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -471,6 +471,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, int i; static int did_version; unsigned short save1, save2; + u8 addr[ETH_ALEN]; PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", (long)memaddr, (long)ioaddr )); @@ -585,14 +586,16 @@ static unsigned long __init lance_probe1( struct net_device *dev, eth_hw_addr_set(dev, OldRieblDefHwaddr); break; case NEW_RIEBL: - lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN); + lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN); + eth_hw_addr_set(dev, addr); break; case PAM_CARD: i = IO->eeprom; for( i = 0; i < 6; ++i ) - dev->dev_addr[i] = + addr[i] = ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) | ((((unsigned short *)MEM)[i*2+1] & 0x0f)); + eth_hw_addr_set(dev, addr); i = IO->mem; break; } diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c index 6784f8748638..055fda11c572 100644 --- a/drivers/net/ethernet/amd/hplance.c +++ b/drivers/net/ethernet/amd/hplance.c @@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d) { unsigned long va = (d->resource.start + DIO_VIRADDRBASE); struct hplance_private *lp; + u8 addr[ETH_ALEN]; int i; /* reset the board */ @@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d) /* The NVRAM holds our ethernet address, one nibble per byte, * at bytes NVRAMOFF+1,3,5,7,9... */ - dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4) + addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4) | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF); } + eth_hw_addr_set(dev, addr); lp = netdev_priv(dev); lp->lance.name = d->name; diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 945bf1d87507..462016666752 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -480,6 +480,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int unsigned long flags; int err = -ENOMEM; void __iomem *bios; + u8 addr[ETH_ALEN]; /* First we look for special cases. Check for HP's on-board ethernet by looking for 'HP' in the BIOS. @@ -541,7 +542,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int /* There is a 16 byte station address PROM at the base address. The first six bytes are the station address. */ for (i = 0; i < 6; i++) - dev->dev_addr[i] = inb(ioaddr + i); + addr[i] = inb(ioaddr + i); + eth_hw_addr_set(dev, addr); printk("%pM", dev->dev_addr); dev->base_addr = ioaddr; diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index da97fccea9ea..410c7b67eba4 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void) static int called; static const char name[] = "MVME147 LANCE"; struct m147lance_private *lp; + u8 macaddr[ETH_ALEN]; u_long *addr; u_long address; int err; @@ -93,15 +94,16 @@ static struct net_device * __init mvme147lance_probe(void) addr = (u_long *)ETHERNET_ADDRESS; address = *addr; - dev->dev_addr[0] = 0x08; - dev->dev_addr[1] = 0x00; - dev->dev_addr[2] = 0x3e; + macaddr[0] = 0x08; + macaddr[1] = 0x00; + macaddr[2] = 0x3e; address = address >> 8; - dev->dev_addr[5] = address&0xff; + macaddr[5] = address&0xff; address = address >> 8; - dev->dev_addr[4] = address&0xff; + macaddr[4] = address&0xff; address = address >> 8; - dev->dev_addr[3] = address&0xff; + macaddr[3] = address&0xff; + eth_hw_addr_set(dev, macaddr); printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n", dev->name, dev->base_addr, MVME147_LANCE_IRQ, diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 032e8922b482..8ba579b89b75 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -251,7 +251,7 @@ static void ni65_recv_intr(struct net_device *dev,int); static void ni65_xmit_intr(struct net_device *dev,int); static int ni65_open(struct net_device *dev); static int ni65_lance_reinit(struct net_device *dev); -static void ni65_init_lance(struct priv *p,unsigned char*,int,int); +static void ni65_init_lance(struct priv *p,const unsigned char*,int,int); static netdev_tx_t ni65_send_packet(struct sk_buff *skb, struct net_device *dev); static void ni65_timeout(struct net_device *dev, unsigned int txqueue); @@ -418,6 +418,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) { int i,j; struct priv *p; + u8 addr[ETH_ALEN]; unsigned long flags; dev->irq = irq; @@ -444,7 +445,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) return -ENODEV; for(j=0;j<6;j++) - dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j); + addr[j] = inb(ioaddr+cards[i].addr_offset+j); + eth_hw_addr_set(dev, addr); if( (j=ni65_alloc_buffer(dev)) < 0) { release_region(ioaddr, cards[i].total_size); @@ -566,7 +568,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) /* * set lance register and trigger init */ -static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) +static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode) { int i; u32 pib; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index f5c50ff377ff..c20c369c7eb8 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev) } static void pcnet32_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct pcnet32_private *lp = netdev_priv(dev); @@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev, } static int pcnet32_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct pcnet32_private *lp = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 533b8519ec35..466273b22f0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -898,6 +898,8 @@ #define PCS_V2_WINDOW_SELECT 0x9064 #define PCS_V2_RV_WINDOW_DEF 0x1060 #define PCS_V2_RV_WINDOW_SELECT 0x1064 +#define PCS_V2_YC_WINDOW_DEF 0x18060 +#define PCS_V2_YC_WINDOW_SELECT 0x18064 /* PCS register entry bit positions and sizes */ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 @@ -1030,8 +1032,8 @@ #define XP_PROP_0_PORT_ID_WIDTH 8 #define XP_PROP_0_PORT_MODE_INDEX 8 #define XP_PROP_0_PORT_MODE_WIDTH 4 -#define XP_PROP_0_PORT_SPEEDS_INDEX 23 -#define XP_PROP_0_PORT_SPEEDS_WIDTH 4 +#define XP_PROP_0_PORT_SPEEDS_INDEX 22 +#define XP_PROP_0_PORT_SPEEDS_WIDTH 5 #define XP_PROP_1_MAX_RX_DMA_INDEX 24 #define XP_PROP_1_MAX_RX_DMA_WIDTH 5 #define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 30d24d19f40d..492ac383f16d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1508,9 +1508,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - mac_tscr = 0; switch (config.tx_type) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 94879cf8b420..6ceb1cdf6eba 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -619,8 +619,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev, return pdata->phy_if.module_eeprom(pdata, eeprom, data); } -static void xgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ringparam) +static void +xgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -631,7 +634,9 @@ static void xgbe_get_ringparam(struct net_device *netdev, } static int xgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ringparam) + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int rx, tx; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 90cb55eb5466..efdcf484a510 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -278,6 +278,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) && + (rdev->device == 0x14b5)) { + pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; + + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; } else { pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; @@ -460,7 +467,7 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev) return ret; } -static const struct xgbe_version_data xgbe_v2a = { +static struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, @@ -475,7 +482,7 @@ static const struct xgbe_version_data xgbe_v2a = { .an_cdr_workaround = 1, }; -static const struct xgbe_version_data xgbe_v2b = { +static struct xgbe_version_data xgbe_v2b = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 213769054391..2156600641b6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -124,10 +124,10 @@ #include "xgbe.h" #include "xgbe-common.h" -#define XGBE_PHY_PORT_SPEED_100 BIT(0) -#define XGBE_PHY_PORT_SPEED_1000 BIT(1) -#define XGBE_PHY_PORT_SPEED_2500 BIT(2) -#define XGBE_PHY_PORT_SPEED_10000 BIT(3) +#define XGBE_PHY_PORT_SPEED_100 BIT(1) +#define XGBE_PHY_PORT_SPEED_1000 BIT(2) +#define XGBE_PHY_PORT_SPEED_2500 BIT(3) +#define XGBE_PHY_PORT_SPEED_10000 BIT(4) #define XGBE_MUTEX_RELEASE 0x80000000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 220dc42af31a..ff2d099aab21 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -869,7 +869,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue) for (i = 0; i < pdata->txq_cnt; i++) { txq = netdev_get_tx_queue(ndev, i); - txq->trans_start = jiffies; + txq_trans_cond_update(txq); netif_tx_start_queue(txq); } } diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 95d3061c61be..8fcaf1639920 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev); static irqreturn_t mace_interrupt(int irq, void *dev_id); static irqreturn_t mace_dma_intr(int irq, void *dev_id); static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue); -static void __mace_set_address(struct net_device *dev, void *addr); +static void __mace_set_address(struct net_device *dev, const void *addr); /* * Load a receive DMA channel with a base address and ring length @@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev) unsigned char *addr; struct net_device *dev; unsigned char checksum = 0; + u8 macaddr[ETH_ALEN]; int err; dev = alloc_etherdev(PRIV_BYTES); @@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev) for (j = 0; j < 6; ++j) { u8 v = bitrev8(addr[j<<4]); checksum ^= v; - dev->dev_addr[j] = v; + macaddr[j] = v; } + eth_hw_addr_set(dev, macaddr); for (; j < 8; ++j) { checksum ^= bitrev8(addr[j<<4]); } @@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev) * Load the address on a mace controller. */ -static void __mace_set_address(struct net_device *dev, void *addr) +static void __mace_set_address(struct net_device *dev, const void *addr) { struct mace_data *mp = netdev_priv(dev); volatile struct mace *mb = mp->mace; - unsigned char *p = addr; + const unsigned char *p = addr; + u8 macaddr[ETH_ALEN]; int i; /* load up the hardware address */ @@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr) ; } for (i = 0; i < 6; ++i) - mb->padr = dev->dev_addr[i] = p[i]; + mb->padr = macaddr[i] = p[i]; + eth_hw_addr_set(dev, macaddr); if (mp->chipid != BROKEN_ADDRCHG_REV) mb->iac = 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 23b2d390fcdd..ace691d7cd75 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -40,10 +40,12 @@ #define AQ_DEVICE_ID_AQC113DEV 0x00C0 #define AQ_DEVICE_ID_AQC113CS 0x94C0 +#define AQ_DEVICE_ID_AQC113CA 0x34C0 #define AQ_DEVICE_ID_AQC114CS 0x93C0 #define AQ_DEVICE_ID_AQC113 0x04C0 #define AQ_DEVICE_ID_AQC113C 0x14C0 #define AQ_DEVICE_ID_AQC115C 0x12C0 +#define AQ_DEVICE_ID_AQC116C 0x11C0 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter" @@ -53,20 +55,19 @@ #define AQ_NIC_RATE_10G BIT(0) #define AQ_NIC_RATE_5G BIT(1) -#define AQ_NIC_RATE_5GSR BIT(2) -#define AQ_NIC_RATE_2G5 BIT(3) -#define AQ_NIC_RATE_1G BIT(4) -#define AQ_NIC_RATE_100M BIT(5) -#define AQ_NIC_RATE_10M BIT(6) -#define AQ_NIC_RATE_1G_HALF BIT(7) -#define AQ_NIC_RATE_100M_HALF BIT(8) -#define AQ_NIC_RATE_10M_HALF BIT(9) +#define AQ_NIC_RATE_2G5 BIT(2) +#define AQ_NIC_RATE_1G BIT(3) +#define AQ_NIC_RATE_100M BIT(4) +#define AQ_NIC_RATE_10M BIT(5) +#define AQ_NIC_RATE_1G_HALF BIT(6) +#define AQ_NIC_RATE_100M_HALF BIT(7) +#define AQ_NIC_RATE_10M_HALF BIT(8) -#define AQ_NIC_RATE_EEE_10G BIT(10) -#define AQ_NIC_RATE_EEE_5G BIT(11) -#define AQ_NIC_RATE_EEE_2G5 BIT(12) -#define AQ_NIC_RATE_EEE_1G BIT(13) -#define AQ_NIC_RATE_EEE_100M BIT(14) +#define AQ_NIC_RATE_EEE_10G BIT(9) +#define AQ_NIC_RATE_EEE_5G BIT(10) +#define AQ_NIC_RATE_EEE_2G5 BIT(11) +#define AQ_NIC_RATE_EEE_1G BIT(12) +#define AQ_NIC_RATE_EEE_100M BIT(13) #define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\ AQ_NIC_RATE_EEE_5G |\ AQ_NIC_RATE_EEE_2G5 |\ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a9ef0544e30f..a418238f6309 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -812,7 +812,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev, } static void aq_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; @@ -827,7 +829,9 @@ static void aq_get_ringparam(struct net_device *ndev, } static int aq_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); const struct aq_hw_caps_s *hw_caps; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 062a300a566a..dbd284660135 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -80,6 +80,8 @@ struct aq_hw_link_status_s { }; struct aq_stats_s { + u64 brc; + u64 btc; u64 uprc; u64 mprc; u64 bprc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e22935ce9573..e65ce7199dac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -231,9 +231,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, struct hwtstamp_config *config) { - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 1acf544afeb4..33f1a1377588 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self) aq_macsec_init(self); #endif - mutex_lock(&self->fwreq_mutex); - err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); - mutex_unlock(&self->fwreq_mutex); - if (err) - goto err_exit; + if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) { + // If DT has none or an invalid one, ask device for MAC address + mutex_lock(&self->fwreq_mutex); + err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr); + mutex_unlock(&self->fwreq_mutex); - eth_hw_addr_set(self->ndev, addr); + if (err) + goto err_exit; - if (!is_valid_ether_addr(self->ndev->dev_addr) || - !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) { - netdev_warn(self->ndev, "MAC is invalid, will use random."); - eth_hw_addr_random(self->ndev); + if (is_valid_ether_addr(addr) && + aq_nic_is_valid_ether_addr(addr)) { + eth_hw_addr_set(self->ndev, addr); + } else { + netdev_warn(self->ndev, "MAC is invalid, will use random."); + eth_hw_addr_random(self->ndev); + } } #if defined(AQ_CFG_MAC_ADDR_PERMANENT) @@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) data[++i] = stats->mbtc; data[++i] = stats->bbrc; data[++i] = stats->bbtc; - data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; - data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; + if (stats->brc) + data[++i] = stats->brc; + else + data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; + if (stats->btc) + data[++i] = stats->btc; + else + data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; data[++i] = stats->dma_pkt_rc; data[++i] = stats->dma_pkt_tc; data[++i] = stats->dma_oct_rc; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index d4b1976ee69b..797a95142d1f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), }, + { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), }, {} }; @@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, { AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, - { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, }, + { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, }, + { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, }, + }; MODULE_DEVICE_TABLE(pci, aq_pci_tbl); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 24122ccda614..77e76c9efd32 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop)) { + if (unlikely(buff->is_eop && buff->skb)) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); dev_kfree_skb_any(buff->skb); + buff->skb = NULL; } buff->pa = 0U; buff->eop_index = 0xffffU; @@ -365,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, if (!buff->is_eop) { buff_ = buff; do { + if (buff_->next >= self->size) { + err = -EIO; + goto err_exit; + } next_ = buff_->next, buff_ = &self->buff_ring[next_]; is_rsc_completed = @@ -388,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, (buff->is_lro && buff->is_cso_err)) { buff_ = buff; do { + if (buff_->next >= self->size) { + err = -EIO; + goto err_exit; + } next_ = buff_->next, buff_ = &self->buff_ring[next_]; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d281322d7dd2..f4774cf051c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u { unsigned int count; - WARN_ONCE(!aq_vec_is_valid_tc(self, tc), - "Invalid tc %u (#rx=%u, #tx=%u)\n", - tc, self->rx_rings, self->tx_rings); if (!aq_vec_is_valid_tc(self, tc)) return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index fc0e66006644..7e88d7234b14 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; if (fw.len == 0xFFFFU) { + if (sw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid sw len: %x\n", sw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_rpc_call(self, sw.len); if (err < 0) goto err_exit; @@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, if (rpc) { if (fw.len) { + if (fw.len > sizeof(self->rpc)) { + printk(KERN_INFO "Invalid fw len: %x\n", fw.len); + err = -EINVAL; + goto err_exit; + } err = hw_atl_utils_fw_downld_dwords(self, self->rpc_addr, @@ -857,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) int hw_atl_utils_update_stats(struct aq_hw_s *self) { struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; struct hw_atl_utils_mbox mbox; + bool corrupted_stats = false; hw_atl_utils_mpi_read_stats(self, &mbox); -#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \ - mbox.stats._N_ - self->last_stats._N_) +#define AQ_SDELTA(_N_) \ +do { \ + if (!corrupted_stats && \ + ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \ + curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc); @@ -882,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) AQ_SDELTA(bbrc); AQ_SDELTA(bbtc); AQ_SDELTA(dpc); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index eac631c45c56..4d4cfbc91e19 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) if (speed & AQ_NIC_RATE_5G) rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_5GSR) - rate |= FW2X_RATE_5G; - if (speed & AQ_NIC_RATE_2G5) rate |= FW2X_RATE_2G5; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index c98708bb044c..5dfc751572ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { AQ_NIC_RATE_5G | AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | - AQ_NIC_RATE_1G_HALF | AQ_NIC_RATE_100M | - AQ_NIC_RATE_100M_HALF | - AQ_NIC_RATE_10M | - AQ_NIC_RATE_10M_HALF, + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc115c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc116c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, }; static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h index de8723f1c28a..346f0dc9912e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h @@ -9,6 +9,8 @@ #include "aq_common.h" extern const struct aq_hw_caps_s hw_atl2_caps_aqc113; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c; extern const struct aq_hw_ops hw_atl2_ops; #endif /* HW_ATL2_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h index b66fa346581c..6bad64c77b87 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h @@ -239,7 +239,8 @@ struct version_s { u8 minor; u16 build; } phy; - u32 rsvd; + u32 drv_iface_ver:4; + u32 rsvd:28; }; struct link_status_s { @@ -424,7 +425,7 @@ struct cable_diag_status_s { u16 rsvd2; }; -struct statistics_s { +struct statistics_a0_s { struct { u32 link_up; u32 link_down; @@ -457,6 +458,33 @@ struct statistics_s { u32 reserve_fw_gap; }; +struct __packed statistics_b0_s { + u64 rx_good_octets; + u64 rx_pause_frames; + u64 rx_good_frames; + u64 rx_errors; + u64 rx_unicast_frames; + u64 rx_multicast_frames; + u64 rx_broadcast_frames; + + u64 tx_good_octets; + u64 tx_pause_frames; + u64 tx_good_frames; + u64 tx_errors; + u64 tx_unicast_frames; + u64 tx_multicast_frames; + u64 tx_broadcast_frames; + + u32 main_loop_cycles; +}; + +struct __packed statistics_s { + union __packed { + struct statistics_a0_s a0; + struct statistics_b0_s b0; + }; +}; + struct filter_caps_s { u8 l2_filters_base_index:6; u8 flexible_filter_mask:2; @@ -545,7 +573,7 @@ struct management_status_s { u32 rsvd5; }; -struct fw_interface_out { +struct __packed fw_interface_out { struct transaction_counter_s transaction_id; struct version_s version; struct link_status_s link_status; @@ -569,7 +597,6 @@ struct fw_interface_out { struct core_dump_s core_dump; u32 rsvd11; struct statistics_s stats; - u32 rsvd12; struct filter_caps_s filter_caps; struct device_caps_s device_caps; u32 rsvd13; @@ -592,6 +619,9 @@ struct fw_interface_out { #define AQ_HOST_MODE_LOW_POWER 3U #define AQ_HOST_MODE_SHUTDOWN 4U +#define AQ_A2_FW_INTERFACE_A0 0 +#define AQ_A2_FW_INTERFACE_B0 1 + int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); int hw_atl2_utils_soft_reset(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index dd259c8f2f4f..58d426dda3ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self, if (cnt > AQ_A2_FW_READ_TRY_MAX) return -ETIME; if (tid1.transaction_cnt_a != tid1.transaction_cnt_b) - udelay(1); + mdelay(1); } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b); hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords); @@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed, { link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G); link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G); - link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR); + link_options->rate_N5G = link_options->rate_5G; link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5); link_options->rate_N2P5G = link_options->rate_2P5G; link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G); @@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) rate |= AQ_NIC_RATE_10G; if (lkp_link_caps->rate_5G) rate |= AQ_NIC_RATE_5G; - if (lkp_link_caps->rate_N5G) - rate |= AQ_NIC_RATE_5GSR; if (lkp_link_caps->rate_2P5G) rate |= AQ_NIC_RATE_2G5; if (lkp_link_caps->rate_1G) @@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac) return 0; } -static int aq_a2_fw_update_stats(struct aq_hw_s *self) +static void aq_a2_fill_a0_stats(struct aq_hw_s *self, + struct statistics_s *stats) { struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; - struct statistics_s stats; - - hw_atl2_shared_buffer_read_safe(self, stats, &stats); - -#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \ - stats.msm._F_ - priv->last_stats.msm._F_) + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \ + curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\ + else \ + corrupted_stats = true; \ +} while (0) if (self->aq_link_status.mbps) { AQ_SDELTA(uprc, rx_unicast_frames); @@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self) AQ_SDELTA(mbtc, tx_multicast_octets); AQ_SDELTA(bbrc, rx_broadcast_octets); AQ_SDELTA(bbtc, tx_broadcast_octets); + + if (!corrupted_stats) + *cs = curr_stats; } #undef AQ_SDELTA - self->curr_stats.dma_pkt_rc = - hw_atl_stats_rx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_pkt_tc = - hw_atl_stats_tx_dma_good_pkt_counter_get(self); - self->curr_stats.dma_oct_rc = - hw_atl_stats_rx_dma_good_octet_counter_get(self); - self->curr_stats.dma_oct_tc = - hw_atl_stats_tx_dma_good_octet_counter_get(self); - self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + +} + +static void aq_a2_fill_b0_stats(struct aq_hw_s *self, + struct statistics_s *stats) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \ + curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \ + else \ + corrupted_stats = true; \ +} while (0) + + if (self->aq_link_status.mbps) { + AQ_SDELTA(uprc, rx_unicast_frames); + AQ_SDELTA(mprc, rx_multicast_frames); + AQ_SDELTA(bprc, rx_broadcast_frames); + AQ_SDELTA(erpr, rx_errors); + AQ_SDELTA(brc, rx_good_octets); + + AQ_SDELTA(uptc, tx_unicast_frames); + AQ_SDELTA(mptc, tx_multicast_frames); + AQ_SDELTA(bptc, tx_broadcast_frames); + AQ_SDELTA(erpt, tx_errors); + AQ_SDELTA(btc, tx_good_octets); + + if (!corrupted_stats) + *cs = curr_stats; + } +#undef AQ_SDELTA +} + +static int aq_a2_fw_update_stats(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct statistics_s stats; + struct version_s version; + int err; + + err = hw_atl2_shared_buffer_read_safe(self, version, &version); + if (err) + return err; + + err = hw_atl2_shared_buffer_read_safe(self, stats, &stats); + if (err) + return err; + + if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0) + aq_a2_fill_a0_stats(self, &stats); + else + aq_a2_fill_b0_stats(self, &stats); + + cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); + cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); + cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); + cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); + cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); memcpy(&priv->last_stats, &stats, sizeof(stats)); @@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) hw_atl2_shared_buffer_read_safe(self, version, &version); /* A2 FW version is stored in reverse order */ - return version.mac.major << 24 | - version.mac.minor << 16 | - version.mac.build; + return version.bundle.major << 24 | + version.bundle.minor << 16 | + version.bundle.build; } int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self, diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c index e230d8d0ff73..e7a9f9863258 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.c +++ b/drivers/net/ethernet/asix/ax88796c_main.c @@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev) static void ax88796c_load_mac_addr(struct net_device *ndev) { struct ax88796c_device *ax_local = to_ax88796c_device(ndev); + u8 addr[ETH_ALEN]; u16 temp; lockdep_assert_held(&ax_local->spi_lock); /* Try the device tree first */ - if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) && + if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) && is_valid_ether_addr(ndev->dev_addr)) { if (netif_msg_probe(ax_local)) dev_info(&ax_local->spi->dev, @@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev) /* Read the MAC address from AX88796C */ temp = AX_READ(&ax_local->ax_spi, P3_MACASR0); - ndev->dev_addr[5] = (u8)temp; - ndev->dev_addr[4] = (u8)(temp >> 8); + addr[5] = (u8)temp; + addr[4] = (u8)(temp >> 8); temp = AX_READ(&ax_local->ax_spi, P3_MACASR1); - ndev->dev_addr[3] = (u8)temp; - ndev->dev_addr[2] = (u8)(temp >> 8); + addr[3] = (u8)temp; + addr[2] = (u8)(temp >> 8); temp = AX_READ(&ax_local->ax_spi, P3_MACASR2); - ndev->dev_addr[1] = (u8)temp; - ndev->dev_addr[0] = (u8)(temp >> 8); + addr[1] = (u8)temp; + addr[0] = (u8)(temp >> 8); - if (is_valid_ether_addr(ndev->dev_addr)) { + if (is_valid_ether_addr(addr)) { + eth_hw_addr_set(ndev, addr); if (netif_msg_probe(ax_local)) dev_info(&ax_local->spi->dev, "MAC address read from ASIX chip\n"); diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h index 80263c3cef75..4a83c991dcbe 100644 --- a/drivers/net/ethernet/asix/ax88796c_main.h +++ b/drivers/net/ethernet/asix/ax88796c_main.h @@ -127,9 +127,9 @@ struct ax88796c_device { #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP) unsigned long flags; - #define EVENT_INTR BIT(0) - #define EVENT_TX BIT(1) - #define EVENT_SET_MULTI BIT(2) + #define EVENT_INTR 0 + #define EVENT_TX 1 + #define EVENT_SET_MULTI 2 }; diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c index 94df4f96d2be..0710e716d682 100644 --- a/drivers/net/ethernet/asix/ax88796c_spi.c +++ b/drivers/net/ethernet/asix/ax88796c_spi.c @@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status) /* OP */ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS; - ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3); + ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3); if (ret) dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret); else diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 88d2ab748399..ec167af0e3b2 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -766,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag) unsigned long timestamp; u32 rx_sm, tx_sm, rx_fd; - timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start; + timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start); if (likely(time_before(jiffies, timestamp + HZ / 10))) return false; @@ -1024,83 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode, ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); } -static void ag71xx_mac_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - switch (state->interface) { - case PHY_INTERFACE_MODE_NA: - break; - case PHY_INTERFACE_MODE_MII: - if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || - ag71xx_is(ag, AR9340) || - ag71xx_is(ag, QCA9530) || - (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) - break; - goto unsupported; - case PHY_INTERFACE_MODE_GMII: - if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || - (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || - (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) - break; - goto unsupported; - case PHY_INTERFACE_MODE_SGMII: - if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) - break; - goto unsupported; - case PHY_INTERFACE_MODE_RMII: - if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) - break; - goto unsupported; - case PHY_INTERFACE_MODE_RGMII: - if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || - (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) - break; - goto unsupported; - default: - goto unsupported; - } - - phylink_set(mask, MII); - - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - phylink_set(mask, Autoneg); - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - - if (state->interface == PHY_INTERFACE_MODE_NA || - state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_RGMII || - state->interface == PHY_INTERFACE_MODE_GMII) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - - return; -unsupported: - linkmode_zero(supported); -} - -static void ag71xx_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) -{ - state->link = 0; -} - -static void ag71xx_mac_an_restart(struct phylink_config *config) -{ - /* Not Supported */ -} - static void ag71xx_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -1163,9 +1086,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops ag71xx_phylink_mac_ops = { - .validate = ag71xx_mac_validate, - .mac_pcs_get_state = ag71xx_mac_pcs_get_state, - .mac_an_restart = ag71xx_mac_an_restart, + .validate = phylink_generic_validate, .mac_config = ag71xx_mac_config, .mac_link_down = ag71xx_mac_link_down, .mac_link_up = ag71xx_mac_link_up, @@ -1177,6 +1098,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag) ag->phylink_config.dev = &ag->ndev->dev; ag->phylink_config.type = PHYLINK_NETDEV; + ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD; + + if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || + ag71xx_is(ag, AR9340) || + ag71xx_is(ag, QCA9530) || + (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) + __set_bit(PHY_INTERFACE_MODE_MII, + ag->phylink_config.supported_interfaces); + + if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || + (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || + (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) + __set_bit(PHY_INTERFACE_MODE_GMII, + ag->phylink_config.supported_interfaces); + + if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) + __set_bit(PHY_INTERFACE_MODE_SGMII, + ag->phylink_config.supported_interfaces); + + if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) + __set_bit(PHY_INTERFACE_MODE_RMII, + ag->phylink_config.supported_interfaces); + + if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || + (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) + __set_bit(PHY_INTERFACE_MODE_RGMII, + ag->phylink_config.supported_interfaces); phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, ag->phy_if_mode, &ag71xx_phylink_mac_ops); @@ -1913,15 +1862,12 @@ static int ag71xx_probe(struct platform_device *pdev) ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); if (IS_ERR(ag->mac_reset)) { netif_err(ag, probe, ndev, "missing mac reset\n"); - err = PTR_ERR(ag->mac_reset); - goto err_free; + return PTR_ERR(ag->mac_reset); } ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!ag->mac_base) { - err = -ENOMEM; - goto err_free; - } + if (!ag->mac_base) + return -ENOMEM; ndev->irq = platform_get_irq(pdev, 0); err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, @@ -1929,7 +1875,7 @@ static int ag71xx_probe(struct platform_device *pdev) if (err) { netif_err(ag, probe, ndev, "unable to request IRQ %d\n", ndev->irq); - goto err_free; + return err; } ndev->netdev_ops = &ag71xx_netdev_ops; @@ -1957,10 +1903,8 @@ static int ag71xx_probe(struct platform_device *pdev) ag->stop_desc = dmam_alloc_coherent(&pdev->dev, sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL); - if (!ag->stop_desc) { - err = -ENOMEM; - goto err_free; - } + if (!ag->stop_desc) + return -ENOMEM; ag->stop_desc->data = 0; ag->stop_desc->ctrl = 0; @@ -1975,7 +1919,7 @@ static int ag71xx_probe(struct platform_device *pdev) err = of_get_phy_mode(np, &ag->phy_if_mode); if (err) { netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); - goto err_free; + return err; } netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); @@ -1983,7 +1927,7 @@ static int ag71xx_probe(struct platform_device *pdev) err = clk_prepare_enable(ag->clk_eth); if (err) { netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); - goto err_free; + return err; } ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); @@ -2019,8 +1963,6 @@ err_mdio_remove: ag71xx_mdio_remove(ag); err_put_clk: clk_disable_unprepare(ag->clk_eth); -err_free: - free_netdev(ndev); return err; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index b4c9e805e981..6a969969d221 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3438,7 +3438,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, } static void atl1_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *txdr = &adapter->tpd_ring; @@ -3451,7 +3453,9 @@ static void atl1_get_ringparam(struct net_device *netdev, } static int atl1_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 969591bbc066..e5857e88c207 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1961,7 +1961,9 @@ static int b44_set_link_ksettings(struct net_device *dev, } static void b44_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct b44 *bp = netdev_priv(dev); @@ -1972,7 +1974,9 @@ static void b44_get_ringparam(struct net_device *dev, } static int b44_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct b44 *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 7cc5213c575a..4a2622b05ee1 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -635,7 +635,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) struct bcm4908_enet_dma_ring_bd *buf_desc; struct bcm4908_enet_dma_ring_slot *slot; struct device *dev = enet->dev; - unsigned int bytes = 0; int handled = 0; while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { @@ -646,7 +645,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); dev_kfree_skb(slot->skb); - bytes += slot->len; if (++tx_ring->read_idx == tx_ring->length) tx_ring->read_idx = 0; @@ -708,7 +706,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev) enet->irq_tx = platform_get_irq_byname(pdev, "tx"); - dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err) + return err; err = bcm4908_enet_dma_alloc(enet); if (err) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a568994a03a6..b04e423c446a 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev, } } -static void bcm_enet_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +static void +bcm_enet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bcm_enet_priv *priv; @@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev, } static int bcm_enet_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bcm_enet_priv *priv; int was_running; @@ -2579,8 +2584,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, } } -static void bcm_enetsw_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +static void +bcm_enetsw_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bcm_enet_priv *priv; @@ -2595,8 +2603,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev, ering->tx_pending = priv->tx_ring_size; } -static int bcm_enetsw_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +static int +bcm_enetsw_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bcm_enet_priv *priv; int was_running; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 40933bf5a710..60dde29974bf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, struct bcm_sysport_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct bcm_sysport_tx_ring *ring; + unsigned long flags, desc_flags; struct bcm_sysport_cb *cb; struct netdev_queue *txq; u32 len_status, addr_lo; unsigned int skb_len; - unsigned long flags; dma_addr_t mapping; u16 queue; int ret; @@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, ring->desc_count--; /* Ports are latched, so write upper address first */ + spin_lock_irqsave(&priv->desc_lock, desc_flags); tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); + spin_unlock_irqrestore(&priv->desc_lock, desc_flags); /* Check ring space and update SW control flow */ if (ring->desc_count == 0) @@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev) } /* Initialize both hardware and software ring */ + spin_lock_init(&priv->desc_lock); for (i = 0; i < dev->num_tx_queues; i++) { ret = bcm_sysport_init_tx_ring(priv, i); if (ret) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 984f76e74b43..16b73bb9acc7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -711,6 +711,7 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ + spinlock_t desc_lock; struct bcm_sysport_tx_ring *tx_rings; /* Receive queue */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index babc955ba64e..e20aafeb4ca9 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -7318,7 +7318,9 @@ static int bnx2_set_coalesce(struct net_device *dev, } static void -bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7389,7 +7391,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq) } static int -bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); int rc; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 2b06d78baa08..a19dd6797070 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1850,6 +1850,14 @@ struct bnx2x { /* Vxlan/Geneve related information */ u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; + +#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0) + u32 fw_cap; + + u32 fw_major; + u32 fw_minor; + u32 fw_rev; + u32 fw_eng; }; /* Tx queues may be less or equal to Rx queues */ @@ -2525,5 +2533,6 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); - +int bnx2x_init_firmware(struct bnx2x *bp); +void bnx2x_release_firmware(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e8e8c2d593c5..8d36ebbf08e1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -25,6 +25,7 @@ #include <linux/ip.h> #include <linux/crash_dump.h> #include <net/tcp.h> +#include <net/gro.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <linux/prefetch.h> @@ -2364,10 +2365,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { /* build my FW version dword */ - u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + - (BCM_5710_FW_MINOR_VERSION << 8) + - (BCM_5710_FW_REVISION_VERSION << 16) + - (BCM_5710_FW_ENGINEERING_VERSION << 24); + u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) + + (bp->fw_rev << 16) + (bp->fw_eng << 24); /* read loaded FW from chip */ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 472a3a478038..0e319ac7799f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev, } static void bnx2x_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); @@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev, } static int bnx2x_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 3f8435208bf4..a84d015da5df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -241,6 +241,8 @@ IRO[221].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) +#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \ + (IRO[386].base + ((fid) * IRO[386].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 /* eth hsi version */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 622fadc50316..611efee75834 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3024,7 +3024,8 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 13 -#define BCM_5710_FW_REVISION_VERSION 15 +#define BCM_5710_FW_REVISION_VERSION 21 +#define BCM_5710_FW_REVISION_VERSION_V15 15 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 1835d2e451c0..fc7fce642666 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, { int i, rc; struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + struct ilt_client_info *ilt_cli; if (!ilt || !ilt->lines) return -1; + ilt_cli = &ilt->clients[cli_num]; + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index aec666e97683..774c1f1a57c3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -74,9 +74,19 @@ __stringify(BCM_5710_FW_MINOR_VERSION) "." \ __stringify(BCM_5710_FW_REVISION_VERSION) "." \ __stringify(BCM_5710_FW_ENGINEERING_VERSION) + +#define FW_FILE_VERSION_V15 \ + __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ + __stringify(BCM_5710_FW_MINOR_VERSION) "." \ + __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \ + __stringify(BCM_5710_FW_ENGINEERING_VERSION) + #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw" /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) @@ -747,9 +757,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) CHIP_IS_E1(bp) ? "everest1" : CHIP_IS_E1H(bp) ? "everest1h" : CHIP_IS_E2(bp) ? "everest2" : "everest3", - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION); + bp->fw_major, bp->fw_minor, bp->fw_rev); return rc; } @@ -12308,6 +12316,15 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_read_fwinfo(bp); + if (IS_PF(bp)) { + rc = bnx2x_init_firmware(bp); + + if (rc) { + bnx2x_free_mem_bp(bp); + return rc; + } + } + func = BP_FUNC(bp); /* need to reset chip if undi was active */ @@ -12320,6 +12337,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) rc = bnx2x_prev_unload(bp); if (rc) { + bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); return rc; } @@ -13026,19 +13044,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_features_check = bnx2x_features_check, }; -static int bnx2x_set_coherency_mask(struct bnx2x *bp) -{ - struct device *dev = &bp->pdev->dev; - - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { - dev_err(dev, "System does not support DMA, aborting\n"); - return -EIO; - } - - return 0; -} - static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) { if (bp->flags & AER_ENABLED) { @@ -13116,9 +13121,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, goto err_out_release; } - rc = bnx2x_set_coherency_mask(bp); - if (rc) + rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n"); goto err_out_release; + } dev->mem_start = pci_resource_start(pdev, 0); dev->base_addr = dev->mem_start; @@ -13317,16 +13324,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp) /* Check FW version */ offset = be32_to_cpu(fw_hdr->fw_version.offset); fw_ver = firmware->data + offset; - if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || - (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || - (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || - (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { + if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor || + fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) { BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION, - BCM_5710_FW_ENGINEERING_VERSION); + fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], + bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng); return -EINVAL; } @@ -13404,34 +13406,51 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -static int bnx2x_init_firmware(struct bnx2x *bp) +int bnx2x_init_firmware(struct bnx2x *bp) { - const char *fw_file_name; + const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; int rc; if (bp->firmware) return 0; - if (CHIP_IS_E1(bp)) + if (CHIP_IS_E1(bp)) { fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1_V15; + } else if (CHIP_IS_E1H(bp)) { fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1H_V15; + } else if (!CHIP_IS_E1x(bp)) { fw_file_name = FW_FILE_NAME_E2; - else { + fw_file_name_v15 = FW_FILE_NAME_E2_V15; + } else { BNX2X_ERR("Unsupported chip revision\n"); return -EINVAL; } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", - fw_file_name); - goto request_firmware_exit; + BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15); + + /* try to load prev version */ + rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev); + + if (rc) + goto request_firmware_exit; + + bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15; + } else { + bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI; + bp->fw_rev = BCM_5710_FW_REVISION_VERSION; } + bp->fw_major = BCM_5710_FW_MAJOR_VERSION; + bp->fw_minor = BCM_5710_FW_MINOR_VERSION; + bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION; + rc = bnx2x_check_firmware(bp); if (rc) { BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); @@ -13487,7 +13506,7 @@ request_firmware_exit: return rc; } -static void bnx2x_release_firmware(struct bnx2x *bp) +void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); @@ -14004,6 +14023,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; init_one_freemem: + bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); init_one_exit: @@ -15356,11 +15376,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", config.tx_type, config.rx_filter); - if (config.flags) { - BNX2X_ERR("config.flags is reserved for future use\n"); - return -EINVAL; - } - bp->hwtstamp_ioctl_called = true; bp->tx_type = config.tx_type; bp->rx_filter = config.rx_filter; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 74a8931ce1d1..11d15cd03600 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) { + u16 abs_fid; + + abs_fid = FW_VF_HANDLE(abs_vfid); + /* set the VF-PF association in the FW */ - storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); - storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp)); + storm_memset_func_en(bp, abs_fid, 1); + + /* Invalidate fp_hsi version for vfs */ + if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI) + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0); /* clear vf errors*/ bnx2x_vf_semi_clear_err(bp, abs_vfid); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 8c2cf5519787..2dac704dc346 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } -static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, +static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set) {return 0; } static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *params) {return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 0b193edb73b8..2bb133ae61c3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -849,7 +849,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) memcpy(old, new, sizeof(struct nig_stats)); - memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1])); + memcpy(&(estats->shared), &(pstats->mac_stx[1]), sizeof(struct mac_stx)); estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; @@ -1634,9 +1635,9 @@ void bnx2x_stats_init(struct bnx2x *bp) REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); if (!CHIP_IS_E3(bp)) { REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + &(bp->port.old_nig_stats.egress_mac_pkt0), 2); REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + &(bp->port.old_nig_stats.egress_mac_pkt1), 2); } /* Prepare statistics ramrod data */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index d55e63692cf3..ae93c078707b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -36,10 +36,14 @@ struct nig_stats { u32 pbf_octets; u32 pbf_packet; u32 safc_inp; - u32 egress_mac_pkt0_lo; - u32 egress_mac_pkt0_hi; - u32 egress_mac_pkt1_lo; - u32 egress_mac_pkt1_hi; + struct_group(egress_mac_pkt0, + u32 egress_mac_pkt0_lo; + u32 egress_mac_pkt0_hi; + ); + struct_group(egress_mac_pkt1, + u32 egress_mac_pkt1_lo; + u32 egress_mac_pkt1_hi; + ); }; enum bnx2x_stats_event { @@ -83,6 +87,7 @@ struct bnx2x_eth_stats { u32 no_buff_discard_hi; u32 no_buff_discard_lo; + struct_group(shared, u32 rx_stat_ifhcinbadoctets_hi; u32 rx_stat_ifhcinbadoctets_lo; u32 tx_stat_ifhcoutbadoctets_hi; @@ -159,6 +164,7 @@ struct bnx2x_eth_stats { u32 tx_stat_dot3statsinternalmactransmiterrors_lo; u32 tx_stat_bmac_ufl_hi; u32 tx_stat_bmac_ufl_lo; + ); u32 pause_frames_received_hi; u32 pause_frames_received_lo; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c04ea83188e2..4f94136a011a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -37,6 +37,7 @@ #include <linux/if_bridge.h> #include <linux/rtc.h> #include <linux/bpf.h> +#include <net/gro.h> #include <net/ip.h> #include <net/tcp.h> #include <net/udp.h> @@ -740,13 +741,16 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, return page; } -static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, +static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, gfp_t gfp) { u8 *data; struct pci_dev *pdev = bp->pdev; - data = kmalloc(bp->rx_buf_size, gfp); + if (gfp == GFP_ATOMIC) + data = napi_alloc_frag(bp->rx_buf_size); + else + data = netdev_alloc_frag(bp->rx_buf_size); if (!data) return NULL; @@ -755,7 +759,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(&pdev->dev, *mapping)) { - kfree(data); + skb_free_frag(data); data = NULL; } return data; @@ -778,7 +782,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rx_buf->data = page; rx_buf->data_ptr = page_address(page) + bp->rx_offset; } else { - u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); + u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); if (!data) return -ENOMEM; @@ -1020,11 +1024,11 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return NULL; } - skb = build_skb(data, 0); + skb = build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (!skb) { - kfree(data); + skb_free_frag(data); return NULL; } @@ -1612,7 +1616,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u8 *new_data; dma_addr_t new_mapping; - new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); + new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats.rx.rx_oom_discards += 1; @@ -1623,13 +1627,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; - skb = build_skb(data, 0); + skb = build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (!skb) { - kfree(data); + skb_free_frag(data); bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; @@ -2043,13 +2047,22 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) { - switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { + u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); + + switch (err_type) { case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: + netdev_warn(bp->dev, "Pause Storm detected!\n"); + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: + netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); + break; default: - netdev_err(bp->dev, "FW reported unknown error type\n"); + netdev_err(bp->dev, "FW reported unknown error type %u\n", + err_type); break; } } @@ -2073,6 +2086,9 @@ static int bnxt_async_event_process(struct bnxt *bp, u32 data1 = le32_to_cpu(cmpl->event_data1); u32 data2 = le32_to_cpu(cmpl->event_data2); + netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", + event_id, data1, data2); + /* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { @@ -2419,7 +2435,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (event & BNXT_REDIRECT_EVENT) - xdp_do_flush_map(); + xdp_do_flush(); if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; @@ -2622,6 +2638,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) { struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_cp_ring_info *cpr_rx; u32 raw_cons = cpr->cp_raw_cons; struct bnxt *bp = bnapi->bp; struct nqe_cn *nqcmp; @@ -2649,7 +2666,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); - return work_done; + goto poll_done; } /* The valid test of the entry must be done first before @@ -2675,6 +2692,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) cpr->cp_raw_cons = raw_cons; BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); } +poll_done: + cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL]; + if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) { + struct dim_sample dim_sample = {}; + + dim_update_sample(cpr->event_ctr, + cpr_rx->rx_packets, + cpr_rx->rx_bytes, + &dim_sample); + net_dim(&cpr->dim, dim_sample); + } return work_done; } @@ -2774,7 +2802,7 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) tpa_info->data = NULL; - kfree(data); + skb_free_frag(data); } skip_rx_tpa_free: @@ -2800,7 +2828,7 @@ skip_rx_tpa_free: dma_unmap_single_attrs(&pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - kfree(data); + skb_free_frag(data); } } @@ -3504,7 +3532,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL); + data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); if (!data) return -ENOMEM; @@ -6474,8 +6502,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u16 val, tmr, max, flags = hw_coal->flags; u32 cmpl_params = coal_cap->cmpl_params; - u16 val, tmr, max, flags = 0; max = hw_coal->bufs_per_record * 128; if (hw_coal->budget) @@ -6518,8 +6546,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); } - if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) - flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; @@ -7668,19 +7694,6 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) BNXT_FW_HEALTH_WIN_MAP_OFF); } -bool bnxt_is_fw_healthy(struct bnxt *bp) -{ - if (bp->fw_health && bp->fw_health->status_reliable) { - u32 fw_status; - - fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status)) - return false; - } - - return true; -} - static void bnxt_inv_fw_health_reg(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -8008,6 +8021,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; + if (!bp->hwrm_cmd_max_timeout) + bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; + else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) + netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", + bp->hwrm_cmd_max_timeout / 1000); if (resp->hwrm_intf_maj_8b >= 1) { bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); @@ -8611,7 +8630,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) /* Filter for default vnic 0 */ rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); if (rc) { - netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + if (BNXT_VF(bp) && rc == -ENODEV) + netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); + else + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); goto err_out; } vnic->uc_filter_count = 1; @@ -9404,6 +9426,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) rc = hwrm_req_send(bp, req); if (rc) { hwrm_req_drop(bp, req); + if (BNXT_VF(bp) && rc == -ENODEV) { + netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); + rc = 0; + } return rc; } @@ -10802,12 +10828,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); if (rc) { - netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", - rc); + if (BNXT_VF(bp) && rc == -ENODEV) { + if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); + else + netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); + rc = 0; + } else { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + } vnic->uc_filter_count = i; return rc; } } + if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); skip_uc: if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && @@ -11372,6 +11407,11 @@ static void bnxt_timer(struct timer_list *t) } } + if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) { + set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } + if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && netif_carrier_ok(dev)) { set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); @@ -11875,7 +11915,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp) static void bnxt_init_dflt_coal(struct bnxt *bp) { + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; struct bnxt_coal *coal; + u16 flags = 0; + + if (coal_cap->cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -11888,6 +11934,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) coal->idle_thresh = 50; coal->bufs_per_record = 2; coal->budget = 64; /* NAPI budget */ + coal->flags = flags; coal = &bp->tx_coal; coal->coal_ticks = 28; @@ -11895,6 +11942,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) coal->coal_ticks_irq = 2; coal->coal_bufs_irq = 2; coal->bufs_per_record = 1; + coal->flags = flags; bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } @@ -12381,8 +12429,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; - bnxt_init_dflt_coal(bp); - timer_setup(&bp->timer, bnxt_timer, 0); bp->current_interval = BNXT_TIMER_INTERVAL; @@ -13072,7 +13118,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->tx_nr_rings = bp->tx_nr_rings_per_tc; rc = __bnxt_reserve_rings(bp); - if (rc) + if (rc && rc != -ENODEV) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); bp->tx_nr_rings_per_tc = bp->tx_nr_rings; if (sh) @@ -13081,7 +13127,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) /* Rings may have been trimmed, re-reserve the trimmed rings. */ if (bnxt_need_reserve_rings(bp)) { rc = __bnxt_reserve_rings(bp); - if (rc) + if (rc && rc != -ENODEV) netdev_warn(bp->dev, "2nd rings reservation failed.\n"); bp->tx_nr_rings_per_tc = bp->tx_nr_rings; } @@ -13107,7 +13153,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) bnxt_clear_int_mode(bp); rc = bnxt_set_dflt_rings(bp, true); if (rc) { - netdev_err(bp->dev, "Not enough rings available.\n"); + if (BNXT_VF(bp) && rc == -ENODEV) + netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); + else + netdev_err(bp->dev, "Not enough rings available.\n"); goto init_dflt_ring_err; } rc = bnxt_init_int_mode(bp); @@ -13395,13 +13444,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_ring_params(bp); rc = bnxt_set_dflt_rings(bp, true); if (rc) { - netdev_err(bp->dev, "Not enough rings available.\n"); - rc = -ENOMEM; + if (BNXT_VF(bp) && rc == -ENODEV) { + netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); + } else { + netdev_err(bp->dev, "Not enough rings available.\n"); + rc = -ENOMEM; + } goto init_err_pci_clean; } bnxt_fw_init_one_p3(bp); + bnxt_init_dflt_coal(bp); + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d0d5da9b78f8..440dfeb4948b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -847,6 +847,7 @@ struct bnxt_coal { u16 idle_thresh; u8 bufs_per_record; u8 budget; + u16 flags; }; struct bnxt_tpa_info { @@ -1915,6 +1916,7 @@ struct bnxt { #define BNXT_STATE_DRV_REGISTERED 7 #define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8 #define BNXT_STATE_NAPI_DISABLED 9 +#define BNXT_STATE_L2_FILTER_RETRY 10 #define BNXT_STATE_FW_ACTIVATE 11 #define BNXT_STATE_RECOVER 12 #define BNXT_STATE_FW_NON_FATAL_COND 13 @@ -1985,7 +1987,8 @@ struct bnxt { u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; - int hwrm_cmd_timeout; + unsigned int hwrm_cmd_timeout; + unsigned int hwrm_cmd_max_timeout; struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ struct hwrm_ver_get_output ver_resp; #define FW_VER_STR_LEN 32 @@ -2258,6 +2261,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } +/* Must hold rtnl_lock */ +static inline bool bnxt_sriov_cfg(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_SRIOV) + return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg); +#else + return false; +#endif +} + extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -2292,7 +2305,6 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); -bool bnxt_is_fw_healthy(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index d3cb2f21946d..c06789882036 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -32,7 +32,7 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, return -ENOMEM; } - hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout); cmn_resp = hwrm_req_hold(bp, msg); resp = cmn_resp; @@ -125,7 +125,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, if (rc) return rc; - hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); req->component_id = cpu_to_le16(component_id); req->segment_id = cpu_to_le16(segment_id); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 5c464ea73576..4da31b1b84f9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/netdevice.h> +#include <linux/vmalloc.h> #include <net/devlink.h> #include "bnxt_hsi.h" #include "bnxt.h" @@ -360,7 +361,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); break; default: - netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err); + netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); break; } @@ -441,12 +442,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { - if (BNXT_PF(bp) && bp->pf.active_vfs) { + rtnl_lock(); + if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, - "reload is unsupported when VFs are allocated"); + "reload is unsupported while VFs are allocated or being configured"); + rtnl_unlock(); return -EOPNOTSUPP; } - rtnl_lock(); if (bp->dev->reg_state == NETREG_UNREGISTERED) { rtnl_unlock(); return -ENODEV; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8188d55722e4..003330e8cd58 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -31,9 +31,6 @@ #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ #include "bnxt_coredump.h" -#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) -#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) -#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) static u32 bnxt_get_msglevel(struct net_device *dev) { @@ -68,6 +65,9 @@ static int bnxt_get_coalesce(struct net_device *dev, coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; + if (hw_coal->flags & + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) + kernel_coal->use_cqe_mode_rx = true; hw_coal = &bp->tx_coal; mult = hw_coal->bufs_per_record; @@ -75,6 +75,9 @@ static int bnxt_get_coalesce(struct net_device *dev, coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; + if (hw_coal->flags & + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) + kernel_coal->use_cqe_mode_tx = true; coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; @@ -101,12 +104,22 @@ static int bnxt_set_coalesce(struct net_device *dev, } } + if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && + !(bp->coal_cap.cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) + return -EOPNOTSUPP; + hw_coal = &bp->rx_coal; mult = hw_coal->bufs_per_record; hw_coal->coal_ticks = coal->rx_coalesce_usecs; hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; + hw_coal->flags &= + ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (kernel_coal->use_cqe_mode_rx) + hw_coal->flags |= + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; hw_coal = &bp->tx_coal; mult = hw_coal->bufs_per_record; @@ -114,6 +127,11 @@ static int bnxt_set_coalesce(struct net_device *dev, hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; + hw_coal->flags &= + ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (kernel_coal->use_cqe_mode_tx) + hw_coal->flags |= + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; @@ -775,7 +793,9 @@ skip_tpa_stats: } static void bnxt_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); @@ -794,7 +814,9 @@ static void bnxt_get_ringparam(struct net_device *dev, } static int bnxt_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); @@ -2169,7 +2191,7 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, req->host_src_addr = cpu_to_le64(dma_handle); } - hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); req->dir_type = cpu_to_le16(dir_type); req->dir_ordinal = cpu_to_le16(dir_ordinal); req->dir_ext = cpu_to_le16(dir_ext); @@ -2515,8 +2537,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware return rc; } - hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); - hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout); + hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout); hwrm_req_hold(bp, modify); modify->host_src_addr = cpu_to_le64(dma_handle); @@ -3917,7 +3939,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_STATS_BLOCK_USECS | - ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + ETHTOOL_COALESCE_USE_ADAPTIVE_RX | + ETHTOOL_COALESCE_USE_CQE, .get_link_ksettings = bnxt_get_link_ksettings, .set_link_ksettings = bnxt_set_link_ksettings, .get_fec_stats = bnxt_get_fec_stats, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index bb7327b82d0b..566c9487ef55 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err) return -EAGAIN; case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: return -EOPNOTSUPP; + case HWRM_ERR_CODE_PF_UNAVAILABLE: + return -ENODEV; default: return -EIO; } @@ -416,6 +418,44 @@ hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); } +static void hwrm_req_dbg(struct bnxt *bp, struct input *req) +{ + u32 ring = le16_to_cpu(req->cmpl_ring); + u32 type = le16_to_cpu(req->req_type); + u32 tgt = le16_to_cpu(req->target_id); + u32 seq = le16_to_cpu(req->seq_id); + char opt[32] = "\n"; + + if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING)) + snprintf(opt, 16, " ring %d\n", ring); + + if (unlikely(tgt != BNXT_HWRM_TARGET)) + snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt); + + netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s", + type, seq, opt); +} + +#define hwrm_err(bp, ctx, fmt, ...) \ + do { \ + if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \ + netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \ + else \ + netdev_err((bp)->dev, fmt, __VA_ARGS__); \ + } while (0) + +static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status) +{ + if (req_type == HWRM_VER_GET) + return false; + + if (!bp->fw_health || !bp->fw_health->status_reliable) + return false; + + *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status); +} + static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) { u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; @@ -427,8 +467,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) unsigned int i, timeout, tmo_count; u32 *data = (u32 *)ctx->req; u32 msg_len = ctx->req_len; + u32 req_type, sts; int rc = -EBUSY; - u32 req_type; u16 len = 0; u8 *valid; @@ -436,8 +476,11 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) memset(ctx->resp, 0, PAGE_SIZE); req_type = le16_to_cpu(ctx->req->req_type); - if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) + if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) { + netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n", + req_type); goto exit; + } if (msg_len > BNXT_HWRM_MAX_REQ_LEN && msg_len > bp->hwrm_max_ext_req_len) { @@ -490,13 +533,15 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) /* Ring channel doorbell */ writel(1, bp->bar0 + doorbell_offset); + hwrm_req_dbg(bp, ctx->req); + if (!pci_is_enabled(bp->pdev)) { rc = -ENODEV; goto exit; } /* Limit timeout to an upper limit */ - timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT); + timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT); /* convert timeout to usec */ timeout *= 1000; @@ -523,17 +568,19 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); } else { - if (HWRM_WAIT_MUST_ABORT(bp, ctx)) - break; + if (hwrm_wait_must_abort(bp, req_type, &sts)) { + hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n", + req_type, sts); + goto exit; + } usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); } } if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { - if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(ctx->req->req_type)); + hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n", + req_type); goto exit; } len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); @@ -565,7 +612,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) if (resp_seq != seen_out_of_seq) { netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", le16_to_cpu(resp_seq), - le16_to_cpu(ctx->req->req_type), + req_type, le16_to_cpu(ctx->req->seq_id)); seen_out_of_seq = resp_seq; } @@ -576,20 +623,22 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); } else { - if (HWRM_WAIT_MUST_ABORT(bp, ctx)) - goto timeout_abort; + if (hwrm_wait_must_abort(bp, req_type, &sts)) { + hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n", + req_type, + le16_to_cpu(ctx->req->seq_id), + len, sts); + goto exit; + } usleep_range(HWRM_MIN_TIMEOUT, HWRM_MAX_TIMEOUT); } } if (i >= tmo_count) { -timeout_abort: - if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) - netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", - hwrm_total_timeout(i), - le16_to_cpu(ctx->req->req_type), - le16_to_cpu(ctx->req->seq_id), len); + hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), req_type, + le16_to_cpu(ctx->req->seq_id), len); goto exit; } @@ -604,12 +653,9 @@ timeout_abort: } if (j >= HWRM_VALID_BIT_DELAY_USEC) { - if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) - netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", - hwrm_total_timeout(i), - le16_to_cpu(ctx->req->req_type), - le16_to_cpu(ctx->req->seq_id), len, - *valid); + hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i), req_type, + le16_to_cpu(ctx->req->seq_id), len, *valid); goto exit; } } @@ -620,11 +666,12 @@ timeout_abort: */ *valid = 0; rc = le16_to_cpu(ctx->resp->error_code); - if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) { - netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - le16_to_cpu(ctx->resp->req_type), - le16_to_cpu(ctx->resp->seq_id), rc); - } + if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n", + req_type); + else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE) + hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + req_type, token->seq_id, rc); rc = __hwrm_to_stderr(rc); exit: if (token) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index 4d17f0d5363b..d52bd2d63aec 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -58,11 +58,10 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); #define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) -#define HWRM_CMD_MAX_TIMEOUT 40000 +#define HWRM_CMD_MAX_TIMEOUT 40000U #define SHORT_HWRM_CMD_TIMEOUT 20 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) -#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) #define BNXT_HWRM_TARGET 0xffff #define BNXT_HWRM_NO_CMPL_RING -1 #define BNXT_HWRM_REQ_MAX_SIZE 128 @@ -83,10 +82,6 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); #define HWRM_MIN_TIMEOUT 25 #define HWRM_MAX_TIMEOUT 40 -#define HWRM_WAIT_MUST_ABORT(bp, ctx) \ - (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \ - !bnxt_is_fw_healthy(bp)) - static inline unsigned int hwrm_total_timeout(unsigned int n) { return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 8388be119f9a..48520967746f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -417,9 +417,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) return -EFAULT; - if (stmpconf.flags) - return -EINVAL; - if (stmpconf.tx_type != HWTSTAMP_TX_ON && stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e6a4a768b10b..d8afcf8d6b30 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, struct flow_cls_offload *flower = type_data; struct bnxt *bp = priv->bp; - if (flower->common.chain_index) + if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) return -EOPNOTSUPP; switch (type) { @@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { - if (!bnxt_is_netdev_indr_offload(netdev)) + if (!netdev || !bnxt_is_netdev_indr_offload(netdev)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index c8083df5e0ab..52fad0fdeacf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -195,7 +195,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event |= BNXT_REDIRECT_EVENT; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(bp->dev, xdp_prog, act); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5f259641437a..c888ddee1fc4 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) * Internal or external PHY with MDIO access */ phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); - if (!phydev) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register PHY device\n"); - return -ENODEV; + return PTR_ERR(phydev); } } else { /* diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 85ca3909859d..c28f8cc00d1c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12390,7 +12390,10 @@ static int tg3_nway_reset(struct net_device *dev) return r; } -static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +static void tg3_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); @@ -12411,7 +12414,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * ering->tx_pending = tp->napi[0].tx_pending; } -static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +static int tg3_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); int i, irq_sync = 0, err = 0; @@ -13800,9 +13806,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) return -EFAULT; - if (stmpconf.flags) - return -EINVAL; - if (stmpconf.tx_type != HWTSTAMP_TX_ON && stmpconf.tx_type != HWTSTAMP_TX_OFF) return -ERANGE; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index bbdc829c3524..f1d2c4cd5da2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3421,7 +3421,7 @@ static const struct net_device_ops bnad_netdev_ops = { }; static void -bnad_netdev_init(struct bnad *bnad, bool using_dac) +bnad_netdev_init(struct bnad *bnad) { struct net_device *netdev = bnad->netdev; @@ -3434,10 +3434,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; - - if (using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HIGHDMA; netdev->mem_start = bnad->mmio_start; netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; @@ -3544,8 +3542,7 @@ bnad_lock_uninit(struct bnad *bnad) /* PCI Initialization */ static int -bnad_pci_init(struct bnad *bnad, - struct pci_dev *pdev, bool *using_dac) +bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev) { int err; @@ -3555,14 +3552,9 @@ bnad_pci_init(struct bnad *bnad, err = pci_request_regions(pdev, BNAD_NAME); if (err) goto disable_device; - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - *using_dac = true; - } else { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) - goto release_regions; - *using_dac = false; - } + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + goto release_regions; pci_set_master(pdev); return 0; @@ -3585,7 +3577,6 @@ static int bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id) { - bool using_dac; int err; struct bnad *bnad; struct bna *bna; @@ -3615,13 +3606,8 @@ bnad_pci_probe(struct pci_dev *pdev, bnad->id = atomic_inc_return(&bna_id) - 1; mutex_lock(&bnad->conf_mutex); - /* - * PCI initialization - * Output : using_dac = 1 for 64 bit DMA - * = 0 for 32 bit DMA - */ - using_dac = false; - err = bnad_pci_init(bnad, pdev, &using_dac); + /* PCI initialization */ + err = bnad_pci_init(bnad, pdev); if (err) goto unlock_mutex; @@ -3634,7 +3620,7 @@ bnad_pci_probe(struct pci_dev *pdev, goto pci_uninit; /* Initialize netdev structure, set up ethtool ops */ - bnad_netdev_init(bnad, using_dac); + bnad_netdev_init(bnad); /* Set link to down state */ netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 391b85f25141..8aca768571b2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -235,13 +235,18 @@ static int bnad_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - u32 supported, advertising; - - supported = SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); cmd->base.autoneg = AUTONEG_DISABLE; - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; cmd->base.phy_address = 0; @@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - return 0; } @@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev, static void bnad_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ringparam) + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) { struct bnad *bnad = netdev_priv(netdev); @@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev, static int bnad_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ringparam) + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) { int i, current_err, err = 0; struct bnad *bnad = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5620b97b3482..9ddbee7de72b 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1271,7 +1271,8 @@ struct macb { struct mii_bus *mii_bus; struct phylink *phylink; struct phylink_config phylink_config; - struct phylink_pcs phylink_pcs; + struct phylink_pcs phylink_usx_pcs; + struct phylink_pcs phylink_sgmii_pcs; u32 caps; unsigned int dma_burst_length; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ffce528aa00e..a363da928e8b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -506,79 +506,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed) netdev_err(bp->dev, "adjusting tx_clk failed.\n"); } -static void macb_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct net_device *ndev = to_net_dev(config->dev); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct macb *bp = netdev_priv(ndev); - - /* We only support MII, RMII, GMII, RGMII & SGMII. */ - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_RMII && - state->interface != PHY_INTERFACE_MODE_GMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - state->interface != PHY_INTERFACE_MODE_10GBASER && - !phy_interface_mode_is_rgmii(state->interface)) { - linkmode_zero(supported); - return; - } - - if (!macb_is_gem(bp) && - (state->interface == PHY_INTERFACE_MODE_GMII || - phy_interface_mode_is_rgmii(state->interface))) { - linkmode_zero(supported); - return; - } - - if (state->interface == PHY_INTERFACE_MODE_10GBASER && - !(bp->caps & MACB_CAPS_HIGH_SPEED && - bp->caps & MACB_CAPS_PCS)) { - linkmode_zero(supported); - return; - } - - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - phylink_set(mask, Asym_Pause); - - if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && - (state->interface == PHY_INTERFACE_MODE_NA || - state->interface == PHY_INTERFACE_MODE_10GBASER)) { - phylink_set_10g_modes(mask); - phylink_set(mask, 10000baseKR_Full); - if (state->interface != PHY_INTERFACE_MODE_NA) - goto out; - } - - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - - if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && - (state->interface == PHY_INTERFACE_MODE_NA || - state->interface == PHY_INTERFACE_MODE_GMII || - state->interface == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_rgmii(state->interface))) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - - if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) - phylink_set(mask, 1000baseT_Half); - } -out: - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex) { - struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); u32 config; config = gem_readl(bp, USX_CONTROL); @@ -592,7 +524,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { - struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); u32 val; state->speed = SPEED_10000; @@ -612,7 +544,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs, const unsigned long *advertising, bool permit_pause_to_mac) { - struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | GEM_BIT(SIGNAL_OK)); @@ -795,28 +727,23 @@ static void macb_mac_link_up(struct phylink_config *config, netif_tx_wake_all_queues(ndev); } -static int macb_mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct macb *bp = netdev_priv(ndev); if (interface == PHY_INTERFACE_MODE_10GBASER) - bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; + return &bp->phylink_usx_pcs; else if (interface == PHY_INTERFACE_MODE_SGMII) - bp->phylink_pcs.ops = &macb_phylink_pcs_ops; + return &bp->phylink_sgmii_pcs; else - bp->phylink_pcs.ops = NULL; - - if (bp->phylink_pcs.ops) - phylink_set_pcs(bp->phylink, &bp->phylink_pcs); - - return 0; + return NULL; } static const struct phylink_mac_ops macb_phylink_ops = { - .validate = macb_validate, - .mac_prepare = macb_mac_prepare, + .validate = phylink_generic_validate, + .mac_select_pcs = macb_mac_select_pcs, .mac_config = macb_mac_config, .mac_link_down = macb_mac_link_down, .mac_link_up = macb_mac_link_up, @@ -874,6 +801,9 @@ static int macb_mii_probe(struct net_device *dev) { struct macb *bp = netdev_priv(dev); + bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; + bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; + bp->phylink_config.dev = &dev->dev; bp->phylink_config.type = PHYLINK_NETDEV; @@ -882,6 +812,35 @@ static int macb_mii_probe(struct net_device *dev) bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; } + bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_10 | MAC_100; + + __set_bit(PHY_INTERFACE_MODE_MII, + bp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, + bp->phylink_config.supported_interfaces); + + /* Determine what modes are supported */ + if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { + bp->phylink_config.mac_capabilities |= MAC_1000FD; + if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) + bp->phylink_config.mac_capabilities |= MAC_1000HD; + + __set_bit(PHY_INTERFACE_MODE_GMII, + bp->phylink_config.supported_interfaces); + phy_interface_set_rgmii(bp->phylink_config.supported_interfaces); + + if (bp->caps & MACB_CAPS_PCS) + __set_bit(PHY_INTERFACE_MODE_SGMII, + bp->phylink_config.supported_interfaces); + + if (bp->caps & MACB_CAPS_HIGH_SPEED) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, + bp->phylink_config.supported_interfaces); + bp->phylink_config.mac_capabilities |= MAC_10000FD; + } + } + bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, bp->phy_interface, &macb_phylink_ops); if (IS_ERR(bp->phylink)) { @@ -3101,7 +3060,9 @@ static int macb_set_link_ksettings(struct net_device *netdev, } static void macb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct macb *bp = netdev_priv(netdev); @@ -3113,7 +3074,9 @@ static void macb_get_ringparam(struct net_device *netdev, } static int macb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct macb *bp = netdev_priv(netdev); u32 new_rx_size, new_tx_size; diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 095c5a2144a7..fb6b27f46b15 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -464,10 +464,6 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) sizeof(*tstamp_config))) return -EFAULT; - /* reserved for future extensions */ - if (tstamp_config->flags) - return -EINVAL; - switch (tstamp_config->tx_type) { case HWTSTAMP_TX_OFF: break; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 2b9747867d4c..2c10ae3f7fc1 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -947,7 +947,9 @@ static int lio_set_phys_id(struct net_device *netdev, static void lio_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; @@ -1252,8 +1254,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) return 0; } -static int lio_ethtool_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) +static int +lio_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { u32 rx_count, tx_count, rx_count_old, tx_count_old; struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 12eee2bc7f5c..ba28aa444e5a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2114,9 +2114,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT; - if (conf.flags) - return -EINVAL; - switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index c607756b731f..568f211d91cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1254,9 +1254,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) return -EFAULT; - if (conf.flags) - return -EINVAL; - switch (conf.tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 4e39d712e121..103591dcea1c 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state { }; static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, - unsigned char *addr) + const unsigned char *addr) { int i; @@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, if (copy_from_user(&config, rq->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) /* reserved for future extensions */ - return -EINVAL; - /* Check the status of hardware for tiemstamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 7f2882109b16..5a9fad61e9ea 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev, } static void nicvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; @@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev, } static int nicvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index bb45d5df2856..a04aa206fddc 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -590,7 +590,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); return true; default: - bpf_warn_invalid_xdp_action(action); + bpf_warn_invalid_xdp_action(nic->netdev, prog, action); fallthrough; case XDP_ABORTED: trace_xdp_exception(nic->netdev, prog, action); @@ -1917,10 +1917,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 50bbe79fb93d..4367edbdd579 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -10,6 +10,7 @@ #include <linux/iommu.h> #include <net/ip.h> #include <net/tso.h> +#include <uapi/linux/bpf.h> #include "nic_reg.h" #include "nic.h" diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 609820e214a3..f4054d2553ea 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev, return 0; } -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; @@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) e->tx_pending = adapter->params.sge.cmdQ_size[0]; } -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; @@ -940,11 +944,11 @@ static const struct net_device_ops cxgb_netdev_ops = { static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; + int i, err; err = pci_enable_device(pdev); if (err) @@ -957,17 +961,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_pdev; } - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - - if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n", - pci_name(pdev)); - err = -ENODEV; - goto out_disable_pdev; - } - - } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } @@ -1039,10 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | NETIF_F_LLTX; + NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index cda01f22c71c..12e76fd0ae91 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1359,7 +1359,7 @@ static void restart_sched(struct tasklet_struct *t) * @fl: the free list that contains the packet buffer * @len: the packet length * - * Process an ingress ethernet pakcet and deliver it to the stack. + * Process an ingress ethernet packet and deliver it to the stack. */ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index bfffcaeee624..63521312cb90 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1948,7 +1948,9 @@ static int set_pauseparam(struct net_device *dev, return 0; } -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -1964,7 +1966,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) e->tx_pending = q->txq_size[0]; } -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -3200,7 +3204,7 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev) NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int i, err, pci_using_dac = 0; + int i, err; resource_size_t mmio_start, mmio_len; const struct adapter_info *ai; struct adapter *adapter = NULL; @@ -3227,9 +3231,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_release_regions; } @@ -3305,8 +3308,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features |= netdev->features & VLAN_FEAT; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + + netdev->features |= NETIF_F_HIGHDMA; netdev->netdev_ops = &cxgb_netdev_ops; netdev->ethtool_ops = &cxgb_ethtool_ops; diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index c3afec1041f8..62dfbdd33365 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */ struct rss_header rss_hdr; __be32 flags; __be32 len_cq; - u8 imm_data[47]; - u8 intr_gen; + struct_group(immediate, + u8 imm_data[47]; + u8 intr_gen; + ); }; /* @@ -925,7 +927,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) if (skb) { __skb_put(skb, IMMED_PKT_SIZE); - skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); + BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate)); + skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE); } return skb; } @@ -1953,7 +1956,7 @@ static int ofld_poll(struct napi_struct *napi, int budget) * @rx_gather: a gather list of packets if we are building a bundle * @gather_idx: index of the next available slot in the bundle * - * Process an ingress offload pakcet and add it to the offload ingress + * Process an ingress offload packet and add it to the offload ingress * queue. Returns the index of the next available slot in the bundle. */ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, @@ -2079,7 +2082,7 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, * @pad: padding * @lro: large receive offload * - * Process an ingress ethernet pakcet and deliver it to the stack. + * Process an ingress ethernet packet and deliver it to the stack. * The padding is 2 if the packet was delivered in an Rx buffer and 0 * if it was immediate data in a response. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 129352bbe114..6c790af92170 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev, return 0; } -static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct sge *s = &pi->adapter->sge; @@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) e->tx_pending = s->ethtxq[pi->first_qset].q.size; } -static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e, + struct kernel_ethtool_ringparam *kernel_e, + struct netlink_ext_ack *extack) { int i; const struct port_info *pi = netdev_priv(dev); @@ -1989,6 +1993,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump, return 0; } +static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type) +{ + /* Read port module EEPROM as long as it is plugged-in and + * safe to read. + */ + return (fw_mod_type != FW_PORT_MOD_TYPE_NONE && + fw_mod_type != FW_PORT_MOD_TYPE_ERROR); +} + static int cxgb4_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { @@ -1997,7 +2010,7 @@ static int cxgb4_get_module_info(struct net_device *dev, struct adapter *adapter = pi->adapter; int ret; - if (!t4_is_inserted_mod_type(pi->mod_type)) + if (!cxgb4_fw_mod_type_info_available(pi->mod_type)) return -EINVAL; switch (pi->port_type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index dde1cf51d0ab..0c78c0db8937 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6608,7 +6608,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int adap_idx = 1; int s_qpp, qpp, num_seg; struct port_info *pi; - bool highdma = false; enum chip_type chip; void __iomem *regs; int func, chip_ver; @@ -6687,14 +6686,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - highdma = true; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_free_adapter; - } + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_free_adapter; } pci_enable_pcie_error_reporting(pdev); @@ -6823,7 +6818,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_TC | NETIF_F_NTUPLE; + NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA; if (chip_ver > CHELSIO_T5) { netdev->hw_enc_features |= NETIF_F_IP_CSUM | @@ -6841,8 +6836,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; } - if (highdma) - netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; netdev->vlan_features = netdev->features & VLAN_FEAT; #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index fa5b596ff23a..f889f404305c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1842,8 +1842,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * (including the VLAN tag) into the header so we reject anything * smaller than that ... */ - fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + - sizeof(wr->ethtype) + sizeof(wr->vlantci); + BUILD_BUG_ON(sizeof(wr->firmware) != + (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + sizeof(wr->vlantci))); + fw_hdr_copy_len = sizeof(wr->firmware); ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len); if (ret) goto out_free; @@ -1924,7 +1926,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, wr->equiq_to_len16 = cpu_to_be32(wr_mid); wr->r3[0] = cpu_to_be32(0); wr->r3[1] = cpu_to_be32(0); - skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len); end = (u64 *)wr + flits; /* If this is a Large Send Offload packet we'll put in an LSO CPL diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0a326c054707..2419459a0b85 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr { __be32 op_immdlen; __be32 equiq_to_len16; __be32 r3[2]; - u8 ethmacdst[6]; - u8 ethmacsrc[6]; - __be16 ethtype; - __be16 vlantci; + struct_group(firmware, + u8 ethmacdst[ETH_ALEN]; + u8 ethmacsrc[ETH_ALEN]; + __be16 ethtype; + __be16 vlantci; + ); }; #define FW_CMD_MAX_TIMEOUT 10000 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 64479c464b4e..7de3800437c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) * first Queue Set. */ static void cxgb4vf_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct sge *s = &pi->adapter->sge; @@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev, * device -- after vetting them of course! */ static int cxgb4vf_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2895,7 +2899,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, struct net_device *netdev; struct port_info *pi; unsigned int pmask; - int pci_using_dac; int err, pidx; /* @@ -2916,19 +2919,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } /* - * Set up our DMA mask: try for 64-bit address masking first and - * fall back to 32-bit if we can't get 64 bits ... + * Set up our DMA mask */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err == 0) { - pci_using_dac = 1; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err != 0) { - dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto err_release_regions; - } - pci_using_dac = 0; + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_release_regions; } /* @@ -3074,9 +3070,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - netdev->features = netdev->hw_features; - if (pci_using_dac) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -3196,6 +3190,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); + err = -EINVAL; goto err_disable_interrupts; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 0295b2406646..43b2ceb6aa32 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct cpl_tx_pkt_core *cpl; const struct skb_shared_info *ssi; dma_addr_t addr[MAX_SKB_FRAGS + 1]; - const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + - sizeof(wr->ethmacsrc) + - sizeof(wr->ethtype) + - sizeof(wr->vlantci)); + const size_t fw_hdr_copy_len = sizeof(wr->firmware); /* * The chip minimum packet length is 10 octets but the firmware @@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) wr->equiq_to_len16 = cpu_to_be32(wr_mid); wr->r3[0] = cpu_to_be32(0); wr->r3[1] = cpu_to_be32(0); - skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len); end = (u64 *)wr + flits; /* diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 84251b85fc93..21a70b1f0ac5 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev) pr_info("No EEPROM, giving up now.\n"); goto out1; } else { + u8 addr[ETH_ALEN]; + for (i = 0; i < ETH_ALEN; i += 2) { /* Big-endian (why??!) */ unsigned short s = readreg(dev, PP_IA + i); - dev->dev_addr[i] = s >> 8; - dev->dev_addr[i+1] = s & 0xff; + addr[i] = s >> 8; + addr[i+1] = s & 0xff; } + eth_hw_addr_set(dev, addr); } dev->irq = SLOT2IRQ(slot); diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index c67a16a48d62..52aaf1bb5205 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -304,7 +304,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr) static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) { - if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) { + if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) { net_warn_ratelimited("%s: PCI dma mapping failed!\n", enic->netdev->name); enic->gen_stats.dma_map_error++; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 6ded4d9fa32a..6c11f9d62526 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -177,7 +177,9 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset, } static void enic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); struct vnic_enet_config *c = &enic->config; @@ -189,7 +191,9 @@ static void enic_get_ringparam(struct net_device *netdev, } static int enic_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); struct vnic_enet_config *c = &enic->config; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index aacf141986d5..4db6889b79ba 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2718,26 +2718,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * fail to 32-bit. */ - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47)); if (err) { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(dev, "No usable DMA configuration, aborting\n"); goto err_out_release_regions; } - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 32); - goto err_out_release_regions; - } } else { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47)); - if (err) { - dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 47); - goto err_out_release_regions; - } using_dac = 1; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 941f175fb911..c78b99a497df 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev) switch (phydev->speed) { case 1000: status.bits.speed = GMAC_SPEED_1000; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", phydev_name(phydev)); @@ -389,6 +389,9 @@ static int gmac_setup_phy(struct net_device *netdev) status.bits.mii_rmii = GMAC_PHY_GMII; break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: netdev_dbg(netdev, "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; @@ -2105,7 +2108,9 @@ static void gmac_get_pauseparam(struct net_device *netdev, } static void gmac_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); @@ -2123,7 +2128,9 @@ static void gmac_get_ringparam(struct net_device *netdev, } static int gmac_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *rp) + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); int err = 0; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 13121c4dcfe6..71730ef4cd57 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) lp->ibn = 3; lp->active = *p++; if (MOTO_SROM_BUG) lp->active = 0; + /* if (MOTO_SROM_BUG) statement indicates lp->active could + * be 8 (i.e. the size of array lp->phy) */ + if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) + return -EINVAL; lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; @@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev) } if ((j == limit) && (i < DE4X5_MAX_MII)) { for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); + if (k < DE4X5_MAX_PHY) { + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); + } else { + goto purgatory; + } } } purgatory: diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index f9955308b93d..dfa784339781 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev, } static void be_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d51f24c9e1b8..ad67b4216079 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5194,7 +5194,8 @@ static void be_netdev_init(struct net_device *netdev) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; @@ -5840,14 +5841,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) SET_NETDEV_DEV(netdev, &pdev->dev); status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (!status) { - netdev->features |= NETIF_F_HIGHDMA; - } else { - status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (status) { - dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); - goto free_netdev; - } + if (status) { + dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); + goto free_netdev; } status = pci_enable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig new file mode 100644 index 000000000000..f4e2b1102d8f --- /dev/null +++ b/drivers/net/ethernet/engleder/Kconfig @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Engleder network device configuration +# + +config NET_VENDOR_ENGLEDER + bool "Engleder devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Engleder devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_ENGLEDER + +config TSNEP + tristate "TSN endpoint support" + depends on HAS_IOMEM && HAS_DMA + depends on PTP_1588_CLOCK_OPTIONAL + select PHYLIB + help + Support for the Engleder TSN endpoint Ethernet MAC IP Core. + + To compile this driver as a module, choose M here. The module will be + called tsnep. + +config TSNEP_SELFTESTS + bool "TSN endpoint self test support" + default n + depends on TSNEP + help + This enables self test support within the TSN endpoint driver. + + If unsure, say N. + +endif # NET_VENDOR_ENGLEDER diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile new file mode 100644 index 000000000000..cce2191cb889 --- /dev/null +++ b/drivers/net/ethernet/engleder/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Engleder Ethernet drivers +# + +obj-$(CONFIG_TSNEP) += tsnep.o + +tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \ + $(tsnep-y) +tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h new file mode 100644 index 000000000000..23bbece6b7de --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#ifndef _TSNEP_H +#define _TSNEP_H + +#include "tsnep_hw.h" + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/ethtool.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/miscdevice.h> + +#define TSNEP "tsnep" + +#define TSNEP_RING_SIZE 256 +#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE) +#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE) + +#define TSNEP_QUEUES 1 + +struct tsnep_gcl { + void __iomem *addr; + + u64 base_time; + u64 cycle_time; + u64 cycle_time_extension; + + struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT]; + int count; + + u64 change_limit; + + u64 start_time; + bool change; +}; + +struct tsnep_tx_entry { + struct tsnep_tx_desc *desc; + struct tsnep_tx_desc_wb *desc_wb; + dma_addr_t desc_dma; + bool owner_user_flag; + + u32 properties; + + struct sk_buff *skb; + size_t len; + DEFINE_DMA_UNMAP_ADDR(dma); +}; + +struct tsnep_tx { + struct tsnep_adapter *adapter; + void __iomem *addr; + + void *page[TSNEP_RING_PAGE_COUNT]; + dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; + + /* TX ring lock */ + spinlock_t lock; + struct tsnep_tx_entry entry[TSNEP_RING_SIZE]; + int write; + int read; + u32 owner_counter; + int increment_owner_counter; + + u32 packets; + u32 bytes; + u32 dropped; +}; + +struct tsnep_rx_entry { + struct tsnep_rx_desc *desc; + struct tsnep_rx_desc_wb *desc_wb; + dma_addr_t desc_dma; + + u32 properties; + + struct sk_buff *skb; + size_t len; + DEFINE_DMA_UNMAP_ADDR(dma); +}; + +struct tsnep_rx { + struct tsnep_adapter *adapter; + void __iomem *addr; + + void *page[TSNEP_RING_PAGE_COUNT]; + dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; + + struct tsnep_rx_entry entry[TSNEP_RING_SIZE]; + int read; + u32 owner_counter; + int increment_owner_counter; + + u32 packets; + u32 bytes; + u32 dropped; + u32 multicast; +}; + +struct tsnep_queue { + struct tsnep_adapter *adapter; + + struct tsnep_tx *tx; + struct tsnep_rx *rx; + + struct napi_struct napi; + + u32 irq_mask; +}; + +struct tsnep_adapter { + struct net_device *netdev; + u8 mac_address[ETH_ALEN]; + struct mii_bus *mdiobus; + bool suppress_preamble; + phy_interface_t phy_mode; + struct phy_device *phydev; + int msg_enable; + + struct platform_device *pdev; + struct device *dmadev; + void __iomem *addr; + int irq; + + bool gate_control; + /* gate control lock */ + struct mutex gate_control_lock; + bool gate_control_active; + struct tsnep_gcl gcl[2]; + int next_gcl; + + struct hwtstamp_config hwtstamp_config; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + /* ptp clock lock */ + spinlock_t ptp_lock; + + int num_tx_queues; + struct tsnep_tx tx[TSNEP_MAX_QUEUES]; + int num_rx_queues; + struct tsnep_rx rx[TSNEP_MAX_QUEUES]; + + int num_queues; + struct tsnep_queue queue[TSNEP_MAX_QUEUES]; +}; + +extern const struct ethtool_ops tsnep_ethtool_ops; + +int tsnep_ptp_init(struct tsnep_adapter *adapter); +void tsnep_ptp_cleanup(struct tsnep_adapter *adapter); +int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); + +int tsnep_tc_init(struct tsnep_adapter *adapter); +void tsnep_tc_cleanup(struct tsnep_adapter *adapter); +int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type, + void *type_data); + +#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS) +int tsnep_ethtool_get_test_count(void); +void tsnep_ethtool_get_test_strings(u8 *data); +void tsnep_ethtool_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data); +#else +static inline int tsnep_ethtool_get_test_count(void) +{ + return -EOPNOTSUPP; +} + +static inline void tsnep_ethtool_get_test_strings(u8 *data) +{ + /* not enabled */ +} + +static inline void tsnep_ethtool_self_test(struct net_device *dev, + struct ethtool_test *eth_test, + u64 *data) +{ + /* not enabled */ +} +#endif /* CONFIG_TSNEP_SELFTESTS */ + +void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time); + +#endif /* _TSNEP_H */ diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c new file mode 100644 index 000000000000..e6760dc68ddd --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#include "tsnep.h" + +static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "rx_bytes", + "rx_dropped", + "rx_multicast", + "rx_phy_errors", + "rx_forwarded_phy_errors", + "rx_invalid_frame_errors", + "tx_packets", + "tx_bytes", + "tx_dropped", +}; + +struct tsnep_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_multicast; + u64 rx_phy_errors; + u64 rx_forwarded_phy_errors; + u64 rx_invalid_frame_errors; + u64 tx_packets; + u64 tx_bytes; + u64 tx_dropped; +}; + +#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64)) + +static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = { + "rx_%d_packets", + "rx_%d_bytes", + "rx_%d_dropped", + "rx_%d_multicast", + "rx_%d_no_descriptor_errors", + "rx_%d_buffer_too_small_errors", + "rx_%d_fifo_overflow_errors", + "rx_%d_invalid_frame_errors", +}; + +struct tsnep_rx_queue_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_multicast; + u64 rx_no_descriptor_errors; + u64 rx_buffer_too_small_errors; + u64 rx_fifo_overflow_errors; + u64 rx_invalid_frame_errors; +}; + +#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \ + sizeof(u64)) + +static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = { + "tx_%d_packets", + "tx_%d_bytes", + "tx_%d_dropped", +}; + +struct tsnep_tx_queue_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_dropped; +}; + +#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \ + sizeof(u64)) + +static void tsnep_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver)); + strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev), + sizeof(drvinfo->bus_info)); +} + +static int tsnep_ethtool_get_regs_len(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int len; + int num_additional_queues; + + len = TSNEP_MAC_SIZE; + + /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to + * the first queue pair extend the register length by TSNEP_QUEUE_SIZE + */ + num_additional_queues = + max(adapter->num_tx_queues, adapter->num_rx_queues) - 1; + len += TSNEP_QUEUE_SIZE * num_additional_queues; + + return len; +} + +static void tsnep_ethtool_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + regs->version = 1; + + memcpy_fromio(p, adapter->addr, regs->len); +} + +static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int rx_count = adapter->num_rx_queues; + int tx_count = adapter->num_tx_queues; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings)); + data += sizeof(tsnep_stats_strings); + + for (i = 0; i < rx_count; i++) { + for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) { + snprintf(data, ETH_GSTRING_LEN, + tsnep_rx_queue_stats_strings[j], i); + data += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < tx_count; i++) { + for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) { + snprintf(data, ETH_GSTRING_LEN, + tsnep_tx_queue_stats_strings[j], i); + data += ETH_GSTRING_LEN; + } + } + break; + case ETH_SS_TEST: + tsnep_ethtool_get_test_strings(data); + break; + } +} + +static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int rx_count = adapter->num_rx_queues; + int tx_count = adapter->num_tx_queues; + struct tsnep_stats tsnep_stats; + struct tsnep_rx_queue_stats tsnep_rx_queue_stats; + struct tsnep_tx_queue_stats tsnep_tx_queue_stats; + u32 reg; + int i; + + memset(&tsnep_stats, 0, sizeof(tsnep_stats)); + for (i = 0; i < adapter->num_rx_queues; i++) { + tsnep_stats.rx_packets += adapter->rx[i].packets; + tsnep_stats.rx_bytes += adapter->rx[i].bytes; + tsnep_stats.rx_dropped += adapter->rx[i].dropped; + tsnep_stats.rx_multicast += adapter->rx[i].multicast; + } + reg = ioread32(adapter->addr + ECM_STAT); + tsnep_stats.rx_phy_errors = + (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; + tsnep_stats.rx_forwarded_phy_errors = + (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; + tsnep_stats.rx_invalid_frame_errors = + (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; + for (i = 0; i < adapter->num_tx_queues; i++) { + tsnep_stats.tx_packets += adapter->tx[i].packets; + tsnep_stats.tx_bytes += adapter->tx[i].bytes; + tsnep_stats.tx_dropped += adapter->tx[i].dropped; + } + memcpy(data, &tsnep_stats, sizeof(tsnep_stats)); + data += TSNEP_STATS_COUNT; + + for (i = 0; i < rx_count; i++) { + memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats)); + tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets; + tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes; + tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped; + tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast; + reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + + TSNEP_RX_STATISTIC); + tsnep_rx_queue_stats.rx_no_descriptor_errors = + (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> + TSNEP_RX_STATISTIC_NO_DESC_SHIFT; + tsnep_rx_queue_stats.rx_buffer_too_small_errors = + (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> + TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; + tsnep_rx_queue_stats.rx_fifo_overflow_errors = + (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> + TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; + tsnep_rx_queue_stats.rx_invalid_frame_errors = + (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> + TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; + memcpy(data, &tsnep_rx_queue_stats, + sizeof(tsnep_rx_queue_stats)); + data += TSNEP_RX_QUEUE_STATS_COUNT; + } + + for (i = 0; i < tx_count; i++) { + memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats)); + tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets; + tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes; + tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped; + memcpy(data, &tsnep_tx_queue_stats, + sizeof(tsnep_tx_queue_stats)); + data += TSNEP_TX_QUEUE_STATS_COUNT; + } +} + +static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int rx_count; + int tx_count; + + switch (sset) { + case ETH_SS_STATS: + rx_count = adapter->num_rx_queues; + tx_count = adapter->num_tx_queues; + return TSNEP_STATS_COUNT + + TSNEP_RX_QUEUE_STATS_COUNT * rx_count + + TSNEP_TX_QUEUE_STATS_COUNT * tx_count; + case ETH_SS_TEST: + return tsnep_ethtool_get_test_count(); + default: + return -EOPNOTSUPP; + } +} + +static int tsnep_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +const struct ethtool_ops tsnep_ethtool_ops = { + .get_drvinfo = tsnep_ethtool_get_drvinfo, + .get_regs_len = tsnep_ethtool_get_regs_len, + .get_regs = tsnep_ethtool_get_regs, + .get_msglevel = tsnep_ethtool_get_msglevel, + .set_msglevel = tsnep_ethtool_set_msglevel, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .self_test = tsnep_ethtool_self_test, + .get_strings = tsnep_ethtool_get_strings, + .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats, + .get_sset_count = tsnep_ethtool_get_sset_count, + .get_ts_info = tsnep_ethtool_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h new file mode 100644 index 000000000000..71cc8577d640 --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_hw.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +/* Hardware definition of TSNEP and EtherCAT MAC device */ + +#ifndef _TSNEP_HW_H +#define _TSNEP_HW_H + +#include <linux/types.h> + +/* type */ +#define ECM_TYPE 0x0000 +#define ECM_REVISION_MASK 0x000000FF +#define ECM_REVISION_SHIFT 0 +#define ECM_VERSION_MASK 0x0000FF00 +#define ECM_VERSION_SHIFT 8 +#define ECM_QUEUE_COUNT_MASK 0x00070000 +#define ECM_QUEUE_COUNT_SHIFT 16 +#define ECM_GATE_CONTROL 0x02000000 + +/* system time */ +#define ECM_SYSTEM_TIME_LOW 0x0008 +#define ECM_SYSTEM_TIME_HIGH 0x000C + +/* clock */ +#define ECM_CLOCK_RATE 0x0010 +#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF +#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000 + +/* interrupt */ +#define ECM_INT_ENABLE 0x0018 +#define ECM_INT_ACTIVE 0x001C +#define ECM_INT_ACKNOWLEDGE 0x001C +#define ECM_INT_LINK 0x00000020 +#define ECM_INT_TX_0 0x00000100 +#define ECM_INT_RX_0 0x00000200 +#define ECM_INT_ALL 0x7FFFFFFF +#define ECM_INT_DISABLE 0x80000000 + +/* reset */ +#define ECM_RESET 0x0020 +#define ECM_RESET_COMMON 0x00000001 +#define ECM_RESET_CHANNEL 0x00000100 +#define ECM_RESET_TXRX 0x00010000 + +/* control and status */ +#define ECM_STATUS 0x0080 +#define ECM_LINK_MODE_OFF 0x01000000 +#define ECM_LINK_MODE_100 0x02000000 +#define ECM_LINK_MODE_1000 0x04000000 +#define ECM_NO_LINK 0x01000000 +#define ECM_LINK_MODE_MASK 0x06000000 + +/* management data */ +#define ECM_MD_CONTROL 0x0084 +#define ECM_MD_STATUS 0x0084 +#define ECM_MD_PREAMBLE 0x00000001 +#define ECM_MD_READ 0x00000004 +#define ECM_MD_WRITE 0x00000002 +#define ECM_MD_ADDR_MASK 0x000000F8 +#define ECM_MD_ADDR_SHIFT 3 +#define ECM_MD_PHY_ADDR_MASK 0x00001F00 +#define ECM_MD_PHY_ADDR_SHIFT 8 +#define ECM_MD_BUSY 0x00000001 +#define ECM_MD_DATA_MASK 0xFFFF0000 +#define ECM_MD_DATA_SHIFT 16 + +/* statistic */ +#define ECM_STAT 0x00B0 +#define ECM_STAT_RX_ERR_MASK 0x000000FF +#define ECM_STAT_RX_ERR_SHIFT 0 +#define ECM_STAT_INV_FRM_MASK 0x0000FF00 +#define ECM_STAT_INV_FRM_SHIFT 8 +#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000 +#define ECM_STAT_FWD_RX_ERR_SHIFT 16 + +/* tsnep */ +#define TSNEP_MAC_SIZE 0x4000 +#define TSNEP_QUEUE_SIZE 0x1000 +#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \ + (__n) == 0 ? \ + 0 : \ + TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); }) +#define TSNEP_MAX_QUEUES 8 +#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */ +#define TSNEP_DESC_SIZE 256 +#define TSNEP_DESC_OFFSET 128 + +/* tsnep register */ +#define TSNEP_INFO 0x0100 +#define TSNEP_INFO_RX_ASSIGN 0x00010000 +#define TSNEP_INFO_TX_TIME 0x00020000 +#define TSNEP_CONTROL 0x0108 +#define TSNEP_CONTROL_TX_RESET 0x00000001 +#define TSNEP_CONTROL_TX_ENABLE 0x00000002 +#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010 +#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020 +#define TSNEP_CONTROL_RX_RESET 0x00000100 +#define TSNEP_CONTROL_RX_ENABLE 0x00000200 +#define TSNEP_CONTROL_RX_DISABLE 0x00000400 +#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000 +#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000 +#define TSNEP_TX_DESC_ADDR_LOW 0x0140 +#define TSNEP_TX_DESC_ADDR_HIGH 0x0144 +#define TSNEP_RX_DESC_ADDR_LOW 0x0180 +#define TSNEP_RX_DESC_ADDR_HIGH 0x0184 +#define TSNEP_RESET_OWNER_COUNTER 0x01 +#define TSNEP_RX_STATISTIC 0x0190 +#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF +#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0 +#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00 +#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8 +#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000 +#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16 +#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000 +#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24 +#define TSNEP_RX_STATISTIC_NO_DESC 0x0190 +#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191 +#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192 +#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193 +#define TSNEP_RX_ASSIGN 0x01A0 +#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001 +#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000 +#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16 +#define TSNEP_MAC_ADDRESS_LOW 0x0800 +#define TSNEP_MAC_ADDRESS_HIGH 0x0804 +#define TSNEP_RX_FILTER 0x0806 +#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001 +#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002 +#define TSNEP_GC 0x0808 +#define TSNEP_GC_ENABLE_A 0x00000002 +#define TSNEP_GC_ENABLE_B 0x00000004 +#define TSNEP_GC_DISABLE 0x00000008 +#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010 +#define TSNEP_GC_ACTIVE_A 0x00000002 +#define TSNEP_GC_ACTIVE_B 0x00000004 +#define TSNEP_GC_CHANGE_AB 0x00000008 +#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010 +#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020 +#define TSNEP_GC_LIST_ERROR 0x00000080 +#define TSNEP_GC_OPEN 0x00FF0000 +#define TSNEP_GC_OPEN_SHIFT 16 +#define TSNEP_GC_NEXT_OPEN 0xFF000000 +#define TSNEP_GC_NEXT_OPEN_SHIFT 24 +#define TSNEP_GC_TIMEOUT 131072 +#define TSNEP_GC_TIME 0x080C +#define TSNEP_GC_CHANGE 0x0810 +#define TSNEP_GCL_A 0x2000 +#define TSNEP_GCL_B 0x2800 +#define TSNEP_GCL_SIZE SZ_2K + +/* tsnep gate control list operation */ +struct tsnep_gcl_operation { + u32 properties; + u32 interval; +}; + +#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation)) +#define TSNEP_GCL_MASK 0x000000FF +#define TSNEP_GCL_INSERT 0x20000000 +#define TSNEP_GCL_CHANGE 0x40000000 +#define TSNEP_GCL_LAST 0x80000000 +#define TSNEP_GCL_MIN_INTERVAL 32 + +/* tsnep TX/RX descriptor */ +#define TSNEP_DESC_SIZE 256 +#define TSNEP_DESC_SIZE_DATA_AFTER 2048 +#define TSNEP_DESC_OFFSET 128 +#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000 +#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30 +#define TSNEP_DESC_LENGTH_MASK 0x00003FFF +#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000 +#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000 +#define TSNEP_DESC_NO_LINK_FLAG 0x01000000 + +/* tsnep TX descriptor */ +struct tsnep_tx_desc { + __le32 properties; + __le32 more_properties; + __le32 reserved[2]; + __le64 next; + __le64 tx; +}; + +#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000 +#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000 +#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000 +#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000 + +/* tsnep TX descriptor writeback */ +struct tsnep_tx_desc_wb { + __le32 properties; + __le32 reserved1[3]; + __le64 timestamp; + __le32 dma_delay; + __le32 reserved2; +}; + +#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000 +#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC +#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2 +#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000 +#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18 +#define TSNEP_TX_DESC_DMA_DELAY_NS 64 + +/* tsnep RX descriptor */ +struct tsnep_rx_desc { + __le32 properties; + __le32 reserved[3]; + __le64 next; + __le64 rx; +}; + +#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC + +/* tsnep RX descriptor writeback */ +struct tsnep_rx_desc_wb { + __le32 properties; + __le32 reserved[7]; +}; + +/* tsnep RX inline meta */ +struct tsnep_rx_inline { + __le64 reserved; + __le64 timestamp; +}; + +#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline)) + +#endif /* _TSNEP_HW_H */ diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c new file mode 100644 index 000000000000..904f3304727e --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -0,0 +1,1272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +/* TSN endpoint Ethernet MAC driver + * + * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time + * communication. It is designed for endpoints within TSN (Time Sensitive + * Networking) networks; e.g., for PLCs in the industrial automation case. + * + * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used + * by the driver. + * + * More information can be found here: + * - www.embedded-experts.at/tsn + * - www.engleder-embedded.com + */ + +#include "tsnep.h" +#include "tsnep_hw.h" + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/iopoll.h> + +#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \ + TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4)) +#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN) +#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH) + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) +#else +#define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) +#endif +#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) + +static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) +{ + iowrite32(mask, adapter->addr + ECM_INT_ENABLE); +} + +static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) +{ + mask |= ECM_INT_DISABLE; + iowrite32(mask, adapter->addr + ECM_INT_ENABLE); +} + +static irqreturn_t tsnep_irq(int irq, void *arg) +{ + struct tsnep_adapter *adapter = arg; + u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); + + /* acknowledge interrupt */ + if (active != 0) + iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); + + /* handle link interrupt */ + if ((active & ECM_INT_LINK) != 0) { + if (adapter->netdev->phydev) + phy_mac_interrupt(adapter->netdev->phydev); + } + + /* handle TX/RX queue 0 interrupt */ + if ((active & adapter->queue[0].irq_mask) != 0) { + if (adapter->netdev) { + tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); + napi_schedule(&adapter->queue[0].napi); + } + } + + return IRQ_HANDLED; +} + +static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) +{ + struct tsnep_adapter *adapter = bus->priv; + u32 md; + int retval; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + md = ECM_MD_READ; + if (!adapter->suppress_preamble) + md |= ECM_MD_PREAMBLE; + md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; + md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; + iowrite32(md, adapter->addr + ECM_MD_CONTROL); + retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, + !(md & ECM_MD_BUSY), 16, 1000); + if (retval != 0) + return retval; + + return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; +} + +static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct tsnep_adapter *adapter = bus->priv; + u32 md; + int retval; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + md = ECM_MD_WRITE; + if (!adapter->suppress_preamble) + md |= ECM_MD_PREAMBLE; + md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; + md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; + md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; + iowrite32(md, adapter->addr + ECM_MD_CONTROL); + retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, + !(md & ECM_MD_BUSY), 16, 1000); + if (retval != 0) + return retval; + + return 0; +} + +static void tsnep_phy_link_status_change(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 mode; + + if (phydev->link) { + switch (phydev->speed) { + case SPEED_100: + mode = ECM_LINK_MODE_100; + break; + case SPEED_1000: + mode = ECM_LINK_MODE_1000; + break; + default: + mode = ECM_LINK_MODE_OFF; + break; + } + iowrite32(mode, adapter->addr + ECM_STATUS); + } + + phy_print_status(netdev->phydev); +} + +static int tsnep_phy_open(struct tsnep_adapter *adapter) +{ + struct phy_device *phydev; + struct ethtool_eee ethtool_eee; + int retval; + + retval = phy_connect_direct(adapter->netdev, adapter->phydev, + tsnep_phy_link_status_change, + adapter->phy_mode); + if (retval) + return retval; + phydev = adapter->netdev->phydev; + + /* MAC supports only 100Mbps|1000Mbps full duplex + * SPE (Single Pair Ethernet) is also an option but not implemented yet + */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* disable EEE autoneg, EEE not supported by TSNEP */ + memset(ðtool_eee, 0, sizeof(ethtool_eee)); + phy_ethtool_set_eee(adapter->phydev, ðtool_eee); + + adapter->phydev->irq = PHY_MAC_INTERRUPT; + phy_start(adapter->phydev); + + return 0; +} + +static void tsnep_phy_close(struct tsnep_adapter *adapter) +{ + phy_stop(adapter->netdev->phydev); + phy_disconnect(adapter->netdev->phydev); + adapter->netdev->phydev = NULL; +} + +static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) +{ + struct device *dmadev = tx->adapter->dmadev; + int i; + + memset(tx->entry, 0, sizeof(tx->entry)); + + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { + if (tx->page[i]) { + dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], + tx->page_dma[i]); + tx->page[i] = NULL; + tx->page_dma[i] = 0; + } + } +} + +static int tsnep_tx_ring_init(struct tsnep_tx *tx) +{ + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + struct tsnep_tx_entry *next_entry; + int i, j; + int retval; + + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { + tx->page[i] = + dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], + GFP_KERNEL); + if (!tx->page[i]) { + retval = -ENOMEM; + goto alloc_failed; + } + for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { + entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; + entry->desc_wb = (struct tsnep_tx_desc_wb *) + (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); + entry->desc = (struct tsnep_tx_desc *) + (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); + entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; + } + } + for (i = 0; i < TSNEP_RING_SIZE; i++) { + entry = &tx->entry[i]; + next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; + entry->desc->next = __cpu_to_le64(next_entry->desc_dma); + } + + return 0; + +alloc_failed: + tsnep_tx_ring_cleanup(tx); + return retval; +} + +static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last) +{ + struct tsnep_tx_entry *entry = &tx->entry[index]; + + entry->properties = 0; + if (entry->skb) { + entry->properties = + skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK; + entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; + if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) + entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; + + /* toggle user flag to prevent false acknowledge + * + * Only the first fragment is acknowledged. For all other + * fragments no acknowledge is done and the last written owner + * counter stays in the writeback descriptor. Therefore, it is + * possible that the last written owner counter is identical to + * the new incremented owner counter and a false acknowledge is + * detected before the real acknowledge has been done by + * hardware. + * + * The user flag is used to prevent this situation. The user + * flag is copied to the writeback descriptor by the hardware + * and is used as additional acknowledge data. By toggeling the + * user flag only for the first fragment (which is + * acknowledged), it is guaranteed that the last acknowledge + * done for this descriptor has used a different user flag and + * cannot be detected as false acknowledge. + */ + entry->owner_user_flag = !entry->owner_user_flag; + } + if (last) + entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; + if (index == tx->increment_owner_counter) { + tx->owner_counter++; + if (tx->owner_counter == 4) + tx->owner_counter = 1; + tx->increment_owner_counter--; + if (tx->increment_owner_counter < 0) + tx->increment_owner_counter = TSNEP_RING_SIZE - 1; + } + entry->properties |= + (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & + TSNEP_DESC_OWNER_COUNTER_MASK; + if (entry->owner_user_flag) + entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; + entry->desc->more_properties = + __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); + + /* descriptor properties shall be written last, because valid data is + * signaled there + */ + dma_wmb(); + + entry->desc->properties = __cpu_to_le32(entry->properties); +} + +static int tsnep_tx_desc_available(struct tsnep_tx *tx) +{ + if (tx->read <= tx->write) + return TSNEP_RING_SIZE - tx->write + tx->read - 1; + else + return tx->read - tx->write - 1; +} + +static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) +{ + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + unsigned int len; + dma_addr_t dma; + int i; + + for (i = 0; i < count; i++) { + entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; + + if (i == 0) { + len = skb_headlen(skb); + dma = dma_map_single(dmadev, skb->data, len, + DMA_TO_DEVICE); + } else { + len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); + dma = skb_frag_dma_map(dmadev, + &skb_shinfo(skb)->frags[i - 1], + 0, len, DMA_TO_DEVICE); + } + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + + entry->len = len; + dma_unmap_addr_set(entry, dma, dma); + + entry->desc->tx = __cpu_to_le64(dma); + } + + return 0; +} + +static void tsnep_tx_unmap(struct tsnep_tx *tx, int count) +{ + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + int i; + + for (i = 0; i < count; i++) { + entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE]; + + if (entry->len) { + if (i == 0) + dma_unmap_single(dmadev, + dma_unmap_addr(entry, dma), + dma_unmap_len(entry, len), + DMA_TO_DEVICE); + else + dma_unmap_page(dmadev, + dma_unmap_addr(entry, dma), + dma_unmap_len(entry, len), + DMA_TO_DEVICE); + entry->len = 0; + } + } +} + +static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, + struct tsnep_tx *tx) +{ + unsigned long flags; + int count = 1; + struct tsnep_tx_entry *entry; + int i; + int retval; + + if (skb_shinfo(skb)->nr_frags > 0) + count += skb_shinfo(skb)->nr_frags; + + spin_lock_irqsave(&tx->lock, flags); + + if (tsnep_tx_desc_available(tx) < count) { + /* ring full, shall not happen because queue is stopped if full + * below + */ + netif_stop_queue(tx->adapter->netdev); + + spin_unlock_irqrestore(&tx->lock, flags); + + return NETDEV_TX_BUSY; + } + + entry = &tx->entry[tx->write]; + entry->skb = skb; + + retval = tsnep_tx_map(skb, tx, count); + if (retval != 0) { + tsnep_tx_unmap(tx, count); + dev_kfree_skb_any(entry->skb); + entry->skb = NULL; + + tx->dropped++; + + spin_unlock_irqrestore(&tx->lock, flags); + + netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); + + return NETDEV_TX_OK; + } + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + for (i = 0; i < count; i++) + tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, + i == (count - 1)); + tx->write = (tx->write + count) % TSNEP_RING_SIZE; + + skb_tx_timestamp(skb); + + /* descriptor properties shall be valid before hardware is notified */ + dma_wmb(); + + iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); + + if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { + /* ring can get full with next frame */ + netif_stop_queue(tx->adapter->netdev); + } + + tx->packets++; + tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN; + + spin_unlock_irqrestore(&tx->lock, flags); + + return NETDEV_TX_OK; +} + +static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) +{ + unsigned long flags; + int budget = 128; + struct tsnep_tx_entry *entry; + int count; + + spin_lock_irqsave(&tx->lock, flags); + + do { + if (tx->read == tx->write) + break; + + entry = &tx->entry[tx->read]; + if ((__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_TX_DESC_OWNER_MASK) != + (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) + break; + + /* descriptor properties shall be read first, because valid data + * is signaled there + */ + dma_rmb(); + + count = 1; + if (skb_shinfo(entry->skb)->nr_frags > 0) + count += skb_shinfo(entry->skb)->nr_frags; + + tsnep_tx_unmap(tx, count); + + if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && + (__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { + struct skb_shared_hwtstamps hwtstamps; + u64 timestamp = + __le64_to_cpu(entry->desc_wb->timestamp); + + memset(&hwtstamps, 0, sizeof(hwtstamps)); + hwtstamps.hwtstamp = ns_to_ktime(timestamp); + + skb_tstamp_tx(entry->skb, &hwtstamps); + } + + napi_consume_skb(entry->skb, budget); + entry->skb = NULL; + + tx->read = (tx->read + count) % TSNEP_RING_SIZE; + + budget--; + } while (likely(budget)); + + if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && + netif_queue_stopped(tx->adapter->netdev)) { + netif_wake_queue(tx->adapter->netdev); + } + + spin_unlock_irqrestore(&tx->lock, flags); + + return (budget != 0); +} + +static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, + struct tsnep_tx *tx) +{ + dma_addr_t dma; + int retval; + + memset(tx, 0, sizeof(*tx)); + tx->adapter = adapter; + tx->addr = addr; + + retval = tsnep_tx_ring_init(tx); + if (retval) + return retval; + + dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; + iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); + iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); + tx->owner_counter = 1; + tx->increment_owner_counter = TSNEP_RING_SIZE - 1; + + spin_lock_init(&tx->lock); + + return 0; +} + +static void tsnep_tx_close(struct tsnep_tx *tx) +{ + u32 val; + + readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, + ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, + 1000000); + + tsnep_tx_ring_cleanup(tx); +} + +static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) +{ + struct device *dmadev = rx->adapter->dmadev; + struct tsnep_rx_entry *entry; + int i; + + for (i = 0; i < TSNEP_RING_SIZE; i++) { + entry = &rx->entry[i]; + if (dma_unmap_addr(entry, dma)) + dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), + dma_unmap_len(entry, len), + DMA_FROM_DEVICE); + if (entry->skb) + dev_kfree_skb(entry->skb); + } + + memset(rx->entry, 0, sizeof(rx->entry)); + + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { + if (rx->page[i]) { + dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], + rx->page_dma[i]); + rx->page[i] = NULL; + rx->page_dma[i] = 0; + } + } +} + +static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx, + struct tsnep_rx_entry *entry) +{ + struct device *dmadev = rx->adapter->dmadev; + struct sk_buff *skb; + dma_addr_t dma; + + skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH, + GFP_ATOMIC | GFP_DMA); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, RX_SKB_RESERVE); + + dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH, + DMA_FROM_DEVICE); + if (dma_mapping_error(dmadev, dma)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + entry->skb = skb; + entry->len = RX_SKB_LENGTH; + dma_unmap_addr_set(entry, dma, dma); + entry->desc->rx = __cpu_to_le64(dma); + + return 0; +} + +static int tsnep_rx_ring_init(struct tsnep_rx *rx) +{ + struct device *dmadev = rx->adapter->dmadev; + struct tsnep_rx_entry *entry; + struct tsnep_rx_entry *next_entry; + int i, j; + int retval; + + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { + rx->page[i] = + dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], + GFP_KERNEL); + if (!rx->page[i]) { + retval = -ENOMEM; + goto failed; + } + for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { + entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; + entry->desc_wb = (struct tsnep_rx_desc_wb *) + (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); + entry->desc = (struct tsnep_rx_desc *) + (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); + entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; + } + } + for (i = 0; i < TSNEP_RING_SIZE; i++) { + entry = &rx->entry[i]; + next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; + entry->desc->next = __cpu_to_le64(next_entry->desc_dma); + + retval = tsnep_rx_alloc_and_map_skb(rx, entry); + if (retval) + goto failed; + } + + return 0; + +failed: + tsnep_rx_ring_cleanup(rx); + return retval; +} + +static void tsnep_rx_activate(struct tsnep_rx *rx, int index) +{ + struct tsnep_rx_entry *entry = &rx->entry[index]; + + /* RX_SKB_LENGTH is a multiple of 4 */ + entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; + entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; + if (index == rx->increment_owner_counter) { + rx->owner_counter++; + if (rx->owner_counter == 4) + rx->owner_counter = 1; + rx->increment_owner_counter--; + if (rx->increment_owner_counter < 0) + rx->increment_owner_counter = TSNEP_RING_SIZE - 1; + } + entry->properties |= + (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & + TSNEP_DESC_OWNER_COUNTER_MASK; + + /* descriptor properties shall be written last, because valid data is + * signaled there + */ + dma_wmb(); + + entry->desc->properties = __cpu_to_le32(entry->properties); +} + +static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, + int budget) +{ + struct device *dmadev = rx->adapter->dmadev; + int done = 0; + struct tsnep_rx_entry *entry; + struct sk_buff *skb; + size_t len; + dma_addr_t dma; + int length; + bool enable = false; + int retval; + + while (likely(done < budget)) { + entry = &rx->entry[rx->read]; + if ((__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_OWNER_COUNTER_MASK) != + (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) + break; + + /* descriptor properties shall be read first, because valid data + * is signaled there + */ + dma_rmb(); + + skb = entry->skb; + len = dma_unmap_len(entry, len); + dma = dma_unmap_addr(entry, dma); + + /* forward skb only if allocation is successful, otherwise + * skb is reused and frame dropped + */ + retval = tsnep_rx_alloc_and_map_skb(rx, entry); + if (!retval) { + dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE); + + length = __le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_LENGTH_MASK; + skb_put(skb, length - ETH_FCS_LEN); + if (rx->adapter->hwtstamp_config.rx_filter == + HWTSTAMP_FILTER_ALL) { + struct skb_shared_hwtstamps *hwtstamps = + skb_hwtstamps(skb); + struct tsnep_rx_inline *rx_inline = + (struct tsnep_rx_inline *)skb->data; + u64 timestamp = + __le64_to_cpu(rx_inline->timestamp); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(timestamp); + } + skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE); + skb->protocol = eth_type_trans(skb, + rx->adapter->netdev); + + rx->packets++; + rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; + if (skb->pkt_type == PACKET_MULTICAST) + rx->multicast++; + + napi_gro_receive(napi, skb); + done++; + } else { + rx->dropped++; + } + + tsnep_rx_activate(rx, rx->read); + + enable = true; + + rx->read = (rx->read + 1) % TSNEP_RING_SIZE; + } + + if (enable) { + /* descriptor properties shall be valid before hardware is + * notified + */ + dma_wmb(); + + iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); + } + + return done; +} + +static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, + struct tsnep_rx *rx) +{ + dma_addr_t dma; + int i; + int retval; + + memset(rx, 0, sizeof(*rx)); + rx->adapter = adapter; + rx->addr = addr; + + retval = tsnep_rx_ring_init(rx); + if (retval) + return retval; + + dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; + iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); + iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); + rx->owner_counter = 1; + rx->increment_owner_counter = TSNEP_RING_SIZE - 1; + + for (i = 0; i < TSNEP_RING_SIZE; i++) + tsnep_rx_activate(rx, i); + + /* descriptor properties shall be valid before hardware is notified */ + dma_wmb(); + + iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); + + return 0; +} + +static void tsnep_rx_close(struct tsnep_rx *rx) +{ + u32 val; + + iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); + readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, + ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, + 1000000); + + tsnep_rx_ring_cleanup(rx); +} + +static int tsnep_poll(struct napi_struct *napi, int budget) +{ + struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, + napi); + bool complete = true; + int done = 0; + + if (queue->tx) + complete = tsnep_tx_poll(queue->tx, budget); + + if (queue->rx) { + done = tsnep_rx_poll(queue->rx, napi, budget); + if (done >= budget) + complete = false; + } + + /* if all work not completed, return budget and keep polling */ + if (!complete) + return budget; + + if (likely(napi_complete_done(napi, done))) + tsnep_enable_irq(queue->adapter, queue->irq_mask); + + return min(done, budget - 1); +} + +static int tsnep_netdev_open(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int i; + void __iomem *addr; + int tx_queue_index = 0; + int rx_queue_index = 0; + int retval; + + retval = tsnep_phy_open(adapter); + if (retval) + return retval; + + for (i = 0; i < adapter->num_queues; i++) { + adapter->queue[i].adapter = adapter; + if (adapter->queue[i].tx) { + addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); + retval = tsnep_tx_open(adapter, addr, + adapter->queue[i].tx); + if (retval) + goto failed; + tx_queue_index++; + } + if (adapter->queue[i].rx) { + addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); + retval = tsnep_rx_open(adapter, addr, + adapter->queue[i].rx); + if (retval) + goto failed; + rx_queue_index++; + } + } + + retval = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (retval) + goto failed; + retval = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (retval) + goto failed; + + for (i = 0; i < adapter->num_queues; i++) { + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, + tsnep_poll, 64); + napi_enable(&adapter->queue[i].napi); + + tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); + } + + return 0; + +failed: + for (i = 0; i < adapter->num_queues; i++) { + if (adapter->queue[i].rx) + tsnep_rx_close(adapter->queue[i].rx); + if (adapter->queue[i].tx) + tsnep_tx_close(adapter->queue[i].tx); + } + tsnep_phy_close(adapter); + return retval; +} + +static int tsnep_netdev_close(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < adapter->num_queues; i++) { + tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); + + napi_disable(&adapter->queue[i].napi); + netif_napi_del(&adapter->queue[i].napi); + + if (adapter->queue[i].rx) + tsnep_rx_close(adapter->queue[i].rx); + if (adapter->queue[i].tx) + tsnep_tx_close(adapter->queue[i].tx); + } + + tsnep_phy_close(adapter); + + return 0; +} + +static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + u16 queue_mapping = skb_get_queue_mapping(skb); + + if (queue_mapping >= adapter->num_tx_queues) + queue_mapping = 0; + + return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); +} + +static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) + return tsnep_ptp_ioctl(netdev, ifr, cmd); + return phy_mii_ioctl(netdev->phydev, ifr, cmd); +} + +static void tsnep_netdev_set_multicast(struct net_device *netdev) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + u16 rx_filter = 0; + + /* configured MAC address and broadcasts are never filtered */ + if (netdev->flags & IFF_PROMISC) { + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; + } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; + } + iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); +} + +static void tsnep_netdev_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + u32 reg; + u32 val; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + stats->tx_packets += adapter->tx[i].packets; + stats->tx_bytes += adapter->tx[i].bytes; + stats->tx_dropped += adapter->tx[i].dropped; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + stats->rx_packets += adapter->rx[i].packets; + stats->rx_bytes += adapter->rx[i].bytes; + stats->rx_dropped += adapter->rx[i].dropped; + stats->multicast += adapter->rx[i].multicast; + + reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + + TSNEP_RX_STATISTIC); + val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> + TSNEP_RX_STATISTIC_NO_DESC_SHIFT; + stats->rx_dropped += val; + val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> + TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; + stats->rx_dropped += val; + val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> + TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; + stats->rx_errors += val; + stats->rx_fifo_errors += val; + val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> + TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; + stats->rx_errors += val; + stats->rx_frame_errors += val; + } + + reg = ioread32(adapter->addr + ECM_STAT); + val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; + stats->rx_errors += val; + val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; + stats->rx_errors += val; + stats->rx_crc_errors += val; + val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; + stats->rx_errors += val; +} + +static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) +{ + iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); + iowrite16(*(u16 *)(addr + sizeof(u32)), + adapter->addr + TSNEP_MAC_ADDRESS_HIGH); + + ether_addr_copy(adapter->mac_address, addr); + netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", + addr); +} + +static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + struct sockaddr *sock_addr = addr; + int retval; + + retval = eth_prepare_mac_addr_change(netdev, sock_addr); + if (retval) + return retval; + eth_hw_addr_set(netdev, sock_addr->sa_data); + tsnep_mac_set_address(adapter, sock_addr->sa_data); + + return 0; +} + +static const struct net_device_ops tsnep_netdev_ops = { + .ndo_open = tsnep_netdev_open, + .ndo_stop = tsnep_netdev_close, + .ndo_start_xmit = tsnep_netdev_xmit_frame, + .ndo_eth_ioctl = tsnep_netdev_ioctl, + .ndo_set_rx_mode = tsnep_netdev_set_multicast, + + .ndo_get_stats64 = tsnep_netdev_get_stats64, + .ndo_set_mac_address = tsnep_netdev_set_mac_address, + .ndo_setup_tc = tsnep_tc_setup, +}; + +static int tsnep_mac_init(struct tsnep_adapter *adapter) +{ + int retval; + + /* initialize RX filtering, at least configured MAC address and + * broadcast are not filtered + */ + iowrite16(0, adapter->addr + TSNEP_RX_FILTER); + + /* try to get MAC address in the following order: + * - device tree + * - valid MAC address already set + * - MAC address register if valid + * - random MAC address + */ + retval = of_get_mac_address(adapter->pdev->dev.of_node, + adapter->mac_address); + if (retval == -EPROBE_DEFER) + return retval; + if (retval && !is_valid_ether_addr(adapter->mac_address)) { + *(u32 *)adapter->mac_address = + ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); + *(u16 *)(adapter->mac_address + sizeof(u32)) = + ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); + if (!is_valid_ether_addr(adapter->mac_address)) + eth_random_addr(adapter->mac_address); + } + + tsnep_mac_set_address(adapter, adapter->mac_address); + eth_hw_addr_set(adapter->netdev, adapter->mac_address); + + return 0; +} + +static int tsnep_mdio_init(struct tsnep_adapter *adapter) +{ + struct device_node *np = adapter->pdev->dev.of_node; + int retval; + + if (np) { + np = of_get_child_by_name(np, "mdio"); + if (!np) + return 0; + + adapter->suppress_preamble = + of_property_read_bool(np, "suppress-preamble"); + } + + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); + if (!adapter->mdiobus) { + retval = -ENOMEM; + + goto out; + } + + adapter->mdiobus->priv = (void *)adapter; + adapter->mdiobus->parent = &adapter->pdev->dev; + adapter->mdiobus->read = tsnep_mdiobus_read; + adapter->mdiobus->write = tsnep_mdiobus_write; + adapter->mdiobus->name = TSNEP "-mdiobus"; + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", + adapter->pdev->name); + + /* do not scan broadcast address */ + adapter->mdiobus->phy_mask = 0x0000001; + + retval = of_mdiobus_register(adapter->mdiobus, np); + +out: + if (np) + of_node_put(np); + + return retval; +} + +static int tsnep_phy_init(struct tsnep_adapter *adapter) +{ + struct device_node *phy_node; + int retval; + + retval = of_get_phy_mode(adapter->pdev->dev.of_node, + &adapter->phy_mode); + if (retval) + adapter->phy_mode = PHY_INTERFACE_MODE_GMII; + + phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", + 0); + adapter->phydev = of_phy_find_device(phy_node); + of_node_put(phy_node); + if (!adapter->phydev && adapter->mdiobus) + adapter->phydev = phy_find_first(adapter->mdiobus); + if (!adapter->phydev) + return -EIO; + + return 0; +} + +static int tsnep_probe(struct platform_device *pdev) +{ + struct tsnep_adapter *adapter; + struct net_device *netdev; + struct resource *io; + u32 type; + int revision; + int version; + int retval; + + netdev = devm_alloc_etherdev_mqs(&pdev->dev, + sizeof(struct tsnep_adapter), + TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); + if (!netdev) + return -ENODEV; + SET_NETDEV_DEV(netdev, &pdev->dev); + adapter = netdev_priv(netdev); + platform_set_drvdata(pdev, adapter); + adapter->pdev = pdev; + adapter->dmadev = &pdev->dev; + adapter->netdev = netdev; + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; + + mutex_init(&adapter->gate_control_lock); + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + adapter->addr = devm_ioremap_resource(&pdev->dev, io); + if (IS_ERR(adapter->addr)) + return PTR_ERR(adapter->addr); + adapter->irq = platform_get_irq(pdev, 0); + netdev->mem_start = io->start; + netdev->mem_end = io->end; + netdev->irq = adapter->irq; + + type = ioread32(adapter->addr + ECM_TYPE); + revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; + version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; + adapter->gate_control = type & ECM_GATE_CONTROL; + + adapter->num_tx_queues = TSNEP_QUEUES; + adapter->num_rx_queues = TSNEP_QUEUES; + adapter->num_queues = TSNEP_QUEUES; + adapter->queue[0].tx = &adapter->tx[0]; + adapter->queue[0].rx = &adapter->rx[0]; + adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; + + tsnep_disable_irq(adapter, ECM_INT_ALL); + retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq, + 0, TSNEP, adapter); + if (retval != 0) { + dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n", + adapter->irq); + return retval; + } + tsnep_enable_irq(adapter, ECM_INT_LINK); + + retval = tsnep_mac_init(adapter); + if (retval) + goto mac_init_failed; + + retval = tsnep_mdio_init(adapter); + if (retval) + goto mdio_init_failed; + + retval = tsnep_phy_init(adapter); + if (retval) + goto phy_init_failed; + + retval = tsnep_ptp_init(adapter); + if (retval) + goto ptp_init_failed; + + retval = tsnep_tc_init(adapter); + if (retval) + goto tc_init_failed; + + netdev->netdev_ops = &tsnep_netdev_ops; + netdev->ethtool_ops = &tsnep_ethtool_ops; + netdev->features = NETIF_F_SG; + netdev->hw_features = netdev->features; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + retval = register_netdev(netdev); + if (retval) + goto register_failed; + + dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, + revision); + if (adapter->gate_control) + dev_info(&adapter->pdev->dev, "gate control detected\n"); + + return 0; + +register_failed: + tsnep_tc_cleanup(adapter); +tc_init_failed: + tsnep_ptp_cleanup(adapter); +ptp_init_failed: +phy_init_failed: + if (adapter->mdiobus) + mdiobus_unregister(adapter->mdiobus); +mdio_init_failed: +mac_init_failed: + tsnep_disable_irq(adapter, ECM_INT_ALL); + return retval; +} + +static int tsnep_remove(struct platform_device *pdev) +{ + struct tsnep_adapter *adapter = platform_get_drvdata(pdev); + + unregister_netdev(adapter->netdev); + + tsnep_tc_cleanup(adapter); + + tsnep_ptp_cleanup(adapter); + + if (adapter->mdiobus) + mdiobus_unregister(adapter->mdiobus); + + tsnep_disable_irq(adapter, ECM_INT_ALL); + + return 0; +} + +static const struct of_device_id tsnep_of_match[] = { + { .compatible = "engleder,tsnep", }, +{ }, +}; +MODULE_DEVICE_TABLE(of, tsnep_of_match); + +static struct platform_driver tsnep_driver = { + .driver = { + .name = TSNEP, + .of_match_table = of_match_ptr(tsnep_of_match), + }, + .probe = tsnep_probe, + .remove = tsnep_remove, +}; +module_platform_driver(tsnep_driver); + +MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); +MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c new file mode 100644 index 000000000000..eaad453d487e --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_ptp.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#include "tsnep.h" + +void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time) +{ + u32 high_before; + u32 low; + u32 high; + + /* read high dword twice to detect overrun */ + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + do { + low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); + high_before = high; + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + } while (high != high_before); + *time = (((u64)high) << 32) | ((u64)low); +} + +int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + + if (!ifr) + return -EINVAL; + + if (cmd == SIOCSHWTSTAMP) { + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + memcpy(&adapter->hwtstamp_config, &config, + sizeof(adapter->hwtstamp_config)); + } + + if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, + sizeof(adapter->hwtstamp_config))) + return -EFAULT; + + return 0; +} + +static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, + ptp_clock_info); + bool negative = false; + u64 rate_offset; + + if (scaled_ppm < 0) { + scaled_ppm = -scaled_ppm; + negative = true; + } + + /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to + * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time, + * simplify calculation because 15625 * 8 = 1000000 / 8 + */ + rate_offset = scaled_ppm; + rate_offset <<= 16 - 3; + rate_offset = div_u64(rate_offset, 15625); + + rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK; + if (negative) + rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN; + iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE); + + return 0; +} + +static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, + ptp_clock_info); + u64 system_time; + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_lock, flags); + + tsnep_get_system_time(adapter, &system_time); + + system_time += delta; + + /* high dword is buffered in hardware and synchronously written to + * system time when low dword is written + */ + iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH); + iowrite32(system_time & 0xFFFFFFFF, + adapter->addr + ECM_SYSTEM_TIME_LOW); + + spin_unlock_irqrestore(&adapter->ptp_lock, flags); + + return 0; +} + +static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, + ptp_clock_info); + u32 high_before; + u32 low; + u32 high; + u64 system_time; + + /* read high dword twice to detect overrun */ + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + do { + ptp_read_system_prets(sts); + low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); + ptp_read_system_postts(sts); + high_before = high; + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + } while (high != high_before); + system_time = (((u64)high) << 32) | ((u64)low); + + *ts = ns_to_timespec64(system_time); + + return 0; +} + +static int tsnep_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, + ptp_clock_info); + u64 system_time = timespec64_to_ns(ts); + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_lock, flags); + + /* high dword is buffered in hardware and synchronously written to + * system time when low dword is written + */ + iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH); + iowrite32(system_time & 0xFFFFFFFF, + adapter->addr + ECM_SYSTEM_TIME_LOW); + + spin_unlock_irqrestore(&adapter->ptp_lock, flags); + + return 0; +} + +int tsnep_ptp_init(struct tsnep_adapter *adapter) +{ + int retval = 0; + + adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + + snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP); + adapter->ptp_clock_info.owner = THIS_MODULE; + /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time, + * stay slightly below because only bits below 2^-1ns are supported + */ + adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1); + adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine; + adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime; + adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64; + adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64; + + spin_lock_init(&adapter->ptp_lock); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + netdev_err(adapter->netdev, "ptp_clock_register failed\n"); + + retval = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + } else if (adapter->ptp_clock) { + netdev_info(adapter->netdev, "PHC added\n"); + } + + return retval; +} + +void tsnep_ptp_cleanup(struct tsnep_adapter *adapter) +{ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + netdev_info(adapter->netdev, "PHC removed\n"); + } +} diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c new file mode 100644 index 000000000000..1581d6b22232 --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_selftests.c @@ -0,0 +1,811 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#include "tsnep.h" + +#include <net/pkt_sched.h> + +enum tsnep_test { + TSNEP_TEST_ENABLE = 0, + TSNEP_TEST_TAPRIO, + TSNEP_TEST_TAPRIO_CHANGE, + TSNEP_TEST_TAPRIO_EXTENSION, +}; + +static const char tsnep_test_strings[][ETH_GSTRING_LEN] = { + "Enable timeout (offline)", + "TAPRIO (offline)", + "TAPRIO change (offline)", + "TAPRIO extension (offline)", +}; + +#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN) + +static bool enable_gc_timeout(struct tsnep_adapter *adapter) +{ + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); + if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE)) + return false; + + return true; +} + +static bool gc_timeout_signaled(struct tsnep_adapter *adapter) +{ + if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL) + return true; + + return false; +} + +static bool ack_gc_timeout(struct tsnep_adapter *adapter) +{ + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); + if (ioread32(adapter->addr + TSNEP_GC) & + (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL)) + return false; + return true; +} + +static bool enable_gc(struct tsnep_adapter *adapter, bool a) +{ + u8 enable; + u8 active; + + if (a) { + enable = TSNEP_GC_ENABLE_A; + active = TSNEP_GC_ACTIVE_A; + } else { + enable = TSNEP_GC_ENABLE_B; + active = TSNEP_GC_ACTIVE_B; + } + + iowrite8(enable, adapter->addr + TSNEP_GC); + if (!(ioread32(adapter->addr + TSNEP_GC) & active)) + return false; + + return true; +} + +static bool disable_gc(struct tsnep_adapter *adapter) +{ + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); + if (ioread32(adapter->addr + TSNEP_GC) & + (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B)) + return false; + + return true; +} + +static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay) +{ + u64 before, after; + u32 time; + bool enabled; + + if (!disable_gc(adapter)) + return false; + + before = ktime_get_ns(); + + if (!enable_gc_timeout(adapter)) + return false; + + /* for start time after timeout, the timeout can guarantee, that enable + * is blocked if too late + */ + time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); + time += TSNEP_GC_TIMEOUT; + iowrite32(time, adapter->addr + TSNEP_GC_TIME); + + ndelay(delay); + + enabled = enable_gc(adapter, a); + after = ktime_get_ns(); + + if (delay > TSNEP_GC_TIMEOUT) { + /* timeout must have blocked enable */ + if (enabled) + return false; + } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) { + /* timeout must not have blocked enable */ + if (!enabled) + return false; + } + + if (enabled) { + if (gc_timeout_signaled(adapter)) + return false; + } else { + if (!gc_timeout_signaled(adapter)) + return false; + if (!ack_gc_timeout(adapter)) + return false; + } + + if (!disable_gc(adapter)) + return false; + + return true; +} + +static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter) +{ + int i; + + iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0); + iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4); + + for (i = 0; i < 200000; i += 100) { + if (!gc_delayed_enable(adapter, true, i)) + return false; + } + + iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0); + iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4); + + for (i = 0; i < 200000; i += 100) { + if (!gc_delayed_enable(adapter, false, i)) + return false; + } + + return true; +} + +static void delay_base_time(struct tsnep_adapter *adapter, + struct tc_taprio_qopt_offload *qopt, s64 ms) +{ + u64 system_time; + u64 base_time = ktime_to_ns(qopt->base_time); + u64 n; + + tsnep_get_system_time(adapter, &system_time); + system_time += ms * 1000000; + n = div64_u64(system_time - base_time, qopt->cycle_time); + + qopt->base_time = ktime_add_ns(qopt->base_time, + (n + 1) * qopt->cycle_time); +} + +static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time, + u64 *system_time) +{ + u32 time_high_before; + u32 time_low; + u32 time_high; + u32 gc_time_before; + + time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME); + do { + time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); + *gc = ioread32(adapter->addr + TSNEP_GC); + + gc_time_before = *gc_time; + *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME); + time_high_before = time_high; + time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); + } while ((time_high != time_high_before) || + (*gc_time != gc_time_before)); + + *system_time = (((u64)time_high) << 32) | ((u64)time_low); +} + +static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next) +{ + u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time); + u64 cycle_start = gcl->base_time + gcl->cycle_time * n; + int i; + + *next = cycle_start; + for (i = 0; i < gcl->count; i++) { + *next += gcl->operation[i].interval; + if (*next > system_time) + break; + } + + return i; +} + +static bool check_gate(struct tsnep_adapter *adapter) +{ + u32 gc_time; + u32 gc; + u64 system_time; + struct tsnep_gcl *curr; + struct tsnep_gcl *prev; + u64 next_time; + u8 gate_open; + u8 next_gate_open; + + get_gate_state(adapter, &gc, &gc_time, &system_time); + + if (gc & TSNEP_GC_ACTIVE_A) { + curr = &adapter->gcl[0]; + prev = &adapter->gcl[1]; + } else if (gc & TSNEP_GC_ACTIVE_B) { + curr = &adapter->gcl[1]; + prev = &adapter->gcl[0]; + } else { + return false; + } + if (curr->start_time <= system_time) { + /* GCL is already active */ + int index; + + index = get_operation(curr, system_time, &next_time); + gate_open = curr->operation[index].properties & TSNEP_GCL_MASK; + if (index == curr->count - 1) + index = 0; + else + index++; + next_gate_open = + curr->operation[index].properties & TSNEP_GCL_MASK; + } else if (curr->change) { + /* operation of previous GCL is active */ + int index; + u64 start_before; + u64 n; + + index = get_operation(prev, system_time, &next_time); + next_time = curr->start_time; + start_before = prev->base_time; + n = div64_u64(curr->start_time - start_before, + prev->cycle_time); + start_before += n * prev->cycle_time; + if (curr->start_time == start_before) + start_before -= prev->cycle_time; + if (((start_before + prev->cycle_time_extension) >= + curr->start_time) && + (curr->start_time - prev->cycle_time_extension <= + system_time)) { + /* extend */ + index = prev->count - 1; + } + gate_open = prev->operation[index].properties & TSNEP_GCL_MASK; + next_gate_open = + curr->operation[0].properties & TSNEP_GCL_MASK; + } else { + /* GCL is waiting for start */ + next_time = curr->start_time; + gate_open = 0xFF; + next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK; + } + + if (gc_time != (next_time & 0xFFFFFFFF)) { + dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n", + gc_time, next_time); + return false; + } + if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) { + dev_err(&adapter->pdev->dev, + "gate control open 0x%02x!=0x%02x\n", + ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT), + gate_open); + return false; + } + if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) != + next_gate_open) { + dev_err(&adapter->pdev->dev, + "gate control next open 0x%02x!=0x%02x\n", + ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT), + next_gate_open); + return false; + } + + return true; +} + +static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms) +{ + ktime_t start = ktime_get(); + + do { + if (!check_gate(adapter)) + return false; + } while (ktime_ms_delta(ktime_get(), start) < ms); + + return true; +} + +static bool enable_check_taprio(struct tsnep_adapter *adapter, + struct tc_taprio_qopt_offload *qopt, s64 ms) +{ + int retval; + + retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt); + if (retval) + return false; + + if (!check_gate_duration(adapter, ms)) + return false; + + return true; +} + +static bool disable_taprio(struct tsnep_adapter *adapter) +{ + struct tc_taprio_qopt_offload qopt; + int retval; + + memset(&qopt, 0, sizeof(qopt)); + qopt.enable = 0; + retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt); + if (retval) + return false; + + return true; +} + +static bool run_taprio(struct tsnep_adapter *adapter, + struct tc_taprio_qopt_offload *qopt, s64 ms) +{ + if (!enable_check_taprio(adapter, qopt, ms)) + return false; + + if (!disable_taprio(adapter)) + return false; + + return true; +} + +static bool tsnep_test_taprio(struct tsnep_adapter *adapter) +{ + struct tc_taprio_qopt_offload *qopt; + int i; + + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); + if (!qopt) + return false; + for (i = 0; i < 255; i++) + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; + + qopt->enable = 1; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1500000; + qopt->cycle_time_extension = 0; + qopt->entries[0].gate_mask = 0x02; + qopt->entries[0].interval = 200000; + qopt->entries[1].gate_mask = 0x03; + qopt->entries[1].interval = 800000; + qopt->entries[2].gate_mask = 0x07; + qopt->entries[2].interval = 240000; + qopt->entries[3].gate_mask = 0x01; + qopt->entries[3].interval = 80000; + qopt->entries[4].gate_mask = 0x04; + qopt->entries[4].interval = 70000; + qopt->entries[5].gate_mask = 0x06; + qopt->entries[5].interval = 60000; + qopt->entries[6].gate_mask = 0x0F; + qopt->entries[6].interval = 50000; + qopt->num_entries = 7; + if (!run_taprio(adapter, qopt, 100)) + goto failed; + + qopt->enable = 1; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 411854; + qopt->cycle_time_extension = 0; + qopt->entries[0].gate_mask = 0x17; + qopt->entries[0].interval = 23842; + qopt->entries[1].gate_mask = 0x16; + qopt->entries[1].interval = 13482; + qopt->entries[2].gate_mask = 0x15; + qopt->entries[2].interval = 49428; + qopt->entries[3].gate_mask = 0x14; + qopt->entries[3].interval = 38189; + qopt->entries[4].gate_mask = 0x13; + qopt->entries[4].interval = 92321; + qopt->entries[5].gate_mask = 0x12; + qopt->entries[5].interval = 71239; + qopt->entries[6].gate_mask = 0x11; + qopt->entries[6].interval = 69932; + qopt->entries[7].gate_mask = 0x10; + qopt->entries[7].interval = 53421; + qopt->num_entries = 8; + if (!run_taprio(adapter, qopt, 100)) + goto failed; + + qopt->enable = 1; + qopt->base_time = ktime_set(0, 0); + delay_base_time(adapter, qopt, 12); + qopt->cycle_time = 125000; + qopt->cycle_time_extension = 0; + qopt->entries[0].gate_mask = 0x27; + qopt->entries[0].interval = 15000; + qopt->entries[1].gate_mask = 0x26; + qopt->entries[1].interval = 15000; + qopt->entries[2].gate_mask = 0x25; + qopt->entries[2].interval = 12500; + qopt->entries[3].gate_mask = 0x24; + qopt->entries[3].interval = 17500; + qopt->entries[4].gate_mask = 0x23; + qopt->entries[4].interval = 10000; + qopt->entries[5].gate_mask = 0x22; + qopt->entries[5].interval = 11000; + qopt->entries[6].gate_mask = 0x21; + qopt->entries[6].interval = 9000; + qopt->entries[7].gate_mask = 0x20; + qopt->entries[7].interval = 10000; + qopt->entries[8].gate_mask = 0x20; + qopt->entries[8].interval = 12500; + qopt->entries[9].gate_mask = 0x20; + qopt->entries[9].interval = 12500; + qopt->num_entries = 10; + if (!run_taprio(adapter, qopt, 100)) + goto failed; + + kfree(qopt); + + return true; + +failed: + disable_taprio(adapter); + kfree(qopt); + + return false; +} + +static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter) +{ + struct tc_taprio_qopt_offload *qopt; + int i; + + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); + if (!qopt) + return false; + for (i = 0; i < 255; i++) + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; + + qopt->enable = 1; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 100000; + qopt->cycle_time_extension = 0; + qopt->entries[0].gate_mask = 0x30; + qopt->entries[0].interval = 20000; + qopt->entries[1].gate_mask = 0x31; + qopt->entries[1].interval = 80000; + qopt->num_entries = 2; + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to identical */ + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + delay_base_time(adapter, qopt, 17); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to same cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->entries[0].gate_mask = 0x42; + qopt->entries[1].gate_mask = 0x43; + delay_base_time(adapter, qopt, 2); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->entries[0].gate_mask = 0x54; + qopt->entries[0].interval = 33333; + qopt->entries[1].gate_mask = 0x55; + qopt->entries[1].interval = 66667; + delay_base_time(adapter, qopt, 23); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->entries[0].gate_mask = 0x66; + qopt->entries[0].interval = 50000; + qopt->entries[1].gate_mask = 0x67; + qopt->entries[1].interval = 25000; + qopt->entries[2].gate_mask = 0x68; + qopt->entries[2].interval = 25000; + qopt->num_entries = 3; + delay_base_time(adapter, qopt, 11); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to multiple of cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 200000; + qopt->entries[0].gate_mask = 0x79; + qopt->entries[0].interval = 50000; + qopt->entries[1].gate_mask = 0x7A; + qopt->entries[1].interval = 150000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 11); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1000000; + qopt->entries[0].gate_mask = 0x7B; + qopt->entries[0].interval = 125000; + qopt->entries[1].gate_mask = 0x7C; + qopt->entries[1].interval = 250000; + qopt->entries[2].gate_mask = 0x7D; + qopt->entries[2].interval = 375000; + qopt->entries[3].gate_mask = 0x7E; + qopt->entries[3].interval = 250000; + qopt->num_entries = 4; + delay_base_time(adapter, qopt, 3); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to shorter cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 333333; + qopt->entries[0].gate_mask = 0x8F; + qopt->entries[0].interval = 166666; + qopt->entries[1].gate_mask = 0x80; + qopt->entries[1].interval = 166667; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 11); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 62500; + qopt->entries[0].gate_mask = 0x81; + qopt->entries[0].interval = 31250; + qopt->entries[1].gate_mask = 0x82; + qopt->entries[1].interval = 15625; + qopt->entries[2].gate_mask = 0x83; + qopt->entries[2].interval = 15625; + qopt->num_entries = 3; + delay_base_time(adapter, qopt, 1); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to longer cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 400000; + qopt->entries[0].gate_mask = 0x84; + qopt->entries[0].interval = 100000; + qopt->entries[1].gate_mask = 0x85; + qopt->entries[1].interval = 100000; + qopt->entries[2].gate_mask = 0x86; + qopt->entries[2].interval = 100000; + qopt->entries[3].gate_mask = 0x87; + qopt->entries[3].interval = 100000; + qopt->num_entries = 4; + delay_base_time(adapter, qopt, 7); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1700000; + qopt->entries[0].gate_mask = 0x88; + qopt->entries[0].interval = 200000; + qopt->entries[1].gate_mask = 0x89; + qopt->entries[1].interval = 300000; + qopt->entries[2].gate_mask = 0x8A; + qopt->entries[2].interval = 600000; + qopt->entries[3].gate_mask = 0x8B; + qopt->entries[3].interval = 100000; + qopt->entries[4].gate_mask = 0x8C; + qopt->entries[4].interval = 500000; + qopt->num_entries = 5; + delay_base_time(adapter, qopt, 6); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + if (!disable_taprio(adapter)) + goto failed; + + kfree(qopt); + + return true; + +failed: + disable_taprio(adapter); + kfree(qopt); + + return false; +} + +static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter) +{ + struct tc_taprio_qopt_offload *qopt; + int i; + + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); + if (!qopt) + return false; + for (i = 0; i < 255; i++) + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; + + qopt->enable = 1; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 100000; + qopt->cycle_time_extension = 50000; + qopt->entries[0].gate_mask = 0x90; + qopt->entries[0].interval = 20000; + qopt->entries[1].gate_mask = 0x91; + qopt->entries[1].interval = 80000; + qopt->num_entries = 2; + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to different phase */ + qopt->base_time = ktime_set(0, 50000); + qopt->entries[0].gate_mask = 0x92; + qopt->entries[0].interval = 33000; + qopt->entries[1].gate_mask = 0x93; + qopt->entries[1].interval = 67000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 2); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to different phase and longer cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1000000; + qopt->cycle_time_extension = 700000; + qopt->entries[0].gate_mask = 0x94; + qopt->entries[0].interval = 400000; + qopt->entries[1].gate_mask = 0x95; + qopt->entries[1].interval = 600000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 7); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 700000); + qopt->cycle_time = 2000000; + qopt->cycle_time_extension = 1900000; + qopt->entries[0].gate_mask = 0x96; + qopt->entries[0].interval = 400000; + qopt->entries[1].gate_mask = 0x97; + qopt->entries[1].interval = 1600000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 9); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to different phase and shorter cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1500000; + qopt->cycle_time_extension = 700000; + qopt->entries[0].gate_mask = 0x98; + qopt->entries[0].interval = 400000; + qopt->entries[1].gate_mask = 0x99; + qopt->entries[1].interval = 600000; + qopt->entries[2].gate_mask = 0x9A; + qopt->entries[2].interval = 500000; + qopt->num_entries = 3; + delay_base_time(adapter, qopt, 3); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 100000); + qopt->cycle_time = 500000; + qopt->cycle_time_extension = 300000; + qopt->entries[0].gate_mask = 0x9B; + qopt->entries[0].interval = 150000; + qopt->entries[1].gate_mask = 0x9C; + qopt->entries[1].interval = 350000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 9); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + /* change to different cycle time */ + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 1000000; + qopt->cycle_time_extension = 700000; + qopt->entries[0].gate_mask = 0xAD; + qopt->entries[0].interval = 400000; + qopt->entries[1].gate_mask = 0xAE; + qopt->entries[1].interval = 300000; + qopt->entries[2].gate_mask = 0xAF; + qopt->entries[2].interval = 300000; + qopt->num_entries = 3; + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 400000; + qopt->cycle_time_extension = 100000; + qopt->entries[0].gate_mask = 0xA0; + qopt->entries[0].interval = 200000; + qopt->entries[1].gate_mask = 0xA1; + qopt->entries[1].interval = 200000; + qopt->num_entries = 2; + delay_base_time(adapter, qopt, 19); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 500000; + qopt->cycle_time_extension = 499999; + qopt->entries[0].gate_mask = 0xB2; + qopt->entries[0].interval = 100000; + qopt->entries[1].gate_mask = 0xB3; + qopt->entries[1].interval = 100000; + qopt->entries[2].gate_mask = 0xB4; + qopt->entries[2].interval = 100000; + qopt->entries[3].gate_mask = 0xB5; + qopt->entries[3].interval = 200000; + qopt->num_entries = 4; + delay_base_time(adapter, qopt, 19); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + qopt->base_time = ktime_set(0, 0); + qopt->cycle_time = 6000000; + qopt->cycle_time_extension = 5999999; + qopt->entries[0].gate_mask = 0xC6; + qopt->entries[0].interval = 1000000; + qopt->entries[1].gate_mask = 0xC7; + qopt->entries[1].interval = 1000000; + qopt->entries[2].gate_mask = 0xC8; + qopt->entries[2].interval = 1000000; + qopt->entries[3].gate_mask = 0xC9; + qopt->entries[3].interval = 1500000; + qopt->entries[4].gate_mask = 0xCA; + qopt->entries[4].interval = 1500000; + qopt->num_entries = 5; + delay_base_time(adapter, qopt, 1); + if (!enable_check_taprio(adapter, qopt, 100)) + goto failed; + + if (!disable_taprio(adapter)) + goto failed; + + kfree(qopt); + + return true; + +failed: + disable_taprio(adapter); + kfree(qopt); + + return false; +} + +int tsnep_ethtool_get_test_count(void) +{ + return TSNEP_TEST_COUNT; +} + +void tsnep_ethtool_get_test_strings(u8 *data) +{ + memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings)); +} + +void tsnep_ethtool_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + eth_test->len = TSNEP_TEST_COUNT; + + if (eth_test->flags != ETH_TEST_FL_OFFLINE) { + /* no tests are done online */ + data[TSNEP_TEST_ENABLE] = 0; + data[TSNEP_TEST_TAPRIO] = 0; + data[TSNEP_TEST_TAPRIO_CHANGE] = 0; + data[TSNEP_TEST_TAPRIO_EXTENSION] = 0; + + return; + } + + if (tsnep_test_gc_enable(adapter)) { + data[TSNEP_TEST_ENABLE] = 0; + } else { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[TSNEP_TEST_ENABLE] = 1; + } + + if (tsnep_test_taprio(adapter)) { + data[TSNEP_TEST_TAPRIO] = 0; + } else { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[TSNEP_TEST_TAPRIO] = 1; + } + + if (tsnep_test_taprio_change(adapter)) { + data[TSNEP_TEST_TAPRIO_CHANGE] = 0; + } else { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[TSNEP_TEST_TAPRIO_CHANGE] = 1; + } + + if (tsnep_test_taprio_extension(adapter)) { + data[TSNEP_TEST_TAPRIO_EXTENSION] = 0; + } else { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[TSNEP_TEST_TAPRIO_EXTENSION] = 1; + } +} diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c new file mode 100644 index 000000000000..c4c6e1357317 --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_tc.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ + +#include "tsnep.h" + +#include <net/pkt_sched.h> + +/* save one operation at the end for additional operation at list change */ +#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1) + +static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt) +{ + int i; + u64 cycle_time; + + if (!qopt->cycle_time) + return -ERANGE; + if (qopt->num_entries > TSNEP_MAX_GCL_NUM) + return -EINVAL; + cycle_time = 0; + for (i = 0; i < qopt->num_entries; i++) { + if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES) + return -EINVAL; + if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK) + return -EINVAL; + if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL) + return -EINVAL; + cycle_time += qopt->entries[i].interval; + } + if (qopt->cycle_time != cycle_time) + return -EINVAL; + if (qopt->cycle_time_extension >= qopt->cycle_time) + return -EINVAL; + + return 0; +} + +static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index, + u32 properties, u32 interval, bool flush) +{ + void __iomem *addr = gcl->addr + + sizeof(struct tsnep_gcl_operation) * index; + + gcl->operation[index].properties = properties; + gcl->operation[index].interval = interval; + + iowrite32(properties, addr); + iowrite32(interval, addr + sizeof(u32)); + + if (flush) { + /* flush write with read access */ + ioread32(addr); + } +} + +static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index) +{ + u64 duration; + int count; + + /* change needs to be triggered one or two operations before start of + * new gate control list + * - change is triggered at start of operation (minimum one operation) + * - operation with adjusted interval is inserted on demand to exactly + * meet the start of the new gate control list (optional) + * + * additionally properties are read directly after start of previous + * operation + * + * therefore, three operations needs to be considered for the limit + */ + duration = 0; + count = 3; + while (count) { + duration += gcl->operation[index].interval; + + index--; + if (index < 0) + index = gcl->count - 1; + + count--; + } + + return duration; +} + +static void tsnep_write_gcl(struct tsnep_gcl *gcl, + struct tc_taprio_qopt_offload *qopt) +{ + int i; + u32 properties; + u64 extend; + u64 cut; + + gcl->base_time = ktime_to_ns(qopt->base_time); + gcl->cycle_time = qopt->cycle_time; + gcl->cycle_time_extension = qopt->cycle_time_extension; + + for (i = 0; i < qopt->num_entries; i++) { + properties = qopt->entries[i].gate_mask; + if (i == (qopt->num_entries - 1)) + properties |= TSNEP_GCL_LAST; + + tsnep_write_gcl_operation(gcl, i, properties, + qopt->entries[i].interval, true); + } + gcl->count = qopt->num_entries; + + /* calculate change limit; i.e., the time needed between enable and + * start of new gate control list + */ + + /* case 1: extend cycle time for change + * - change duration of last operation + * - cycle time extension + */ + extend = tsnep_change_duration(gcl, gcl->count - 1); + extend += gcl->cycle_time_extension; + + /* case 2: cut cycle time for change + * - maximum change duration + */ + cut = 0; + for (i = 0; i < gcl->count; i++) + cut = max(cut, tsnep_change_duration(gcl, i)); + + /* use maximum, because the actual case (extend or cut) can be + * determined only after limit is known (chicken-and-egg problem) + */ + gcl->change_limit = max(extend, cut); +} + +static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit) +{ + u64 start = gcl->base_time; + u64 n; + + if (start <= limit) { + n = div64_u64(limit - start, gcl->cycle_time); + start += (n + 1) * gcl->cycle_time; + } + + return start; +} + +static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit) +{ + u64 start = gcl->base_time; + u64 n; + + n = div64_u64(limit - start, gcl->cycle_time); + start += n * gcl->cycle_time; + if (start == limit) + start -= gcl->cycle_time; + + return start; +} + +static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change, + bool insert) +{ + /* previous operation triggers change and properties are evaluated at + * start of operation + */ + if (index == 0) + index = gcl->count - 1; + else + index = index - 1; + change -= gcl->operation[index].interval; + + /* optionally change to new list with additional operation in between */ + if (insert) { + void __iomem *addr = gcl->addr + + sizeof(struct tsnep_gcl_operation) * index; + + gcl->operation[index].properties |= TSNEP_GCL_INSERT; + iowrite32(gcl->operation[index].properties, addr); + } + + return change; +} + +static void tsnep_clean_gcl(struct tsnep_gcl *gcl) +{ + int i; + u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK; + void __iomem *addr; + + /* search for insert operation and reset properties */ + for (i = 0; i < gcl->count; i++) { + if (gcl->operation[i].properties & ~mask) { + addr = gcl->addr + + sizeof(struct tsnep_gcl_operation) * i; + + gcl->operation[i].properties &= mask; + iowrite32(gcl->operation[i].properties, addr); + + break; + } + } +} + +static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref, + u64 change, u32 interval) +{ + u32 properties; + + properties = gcl->operation[ref].properties & TSNEP_GCL_MASK; + /* change to new list directly after inserted operation */ + properties |= TSNEP_GCL_CHANGE; + + /* last operation of list is reserved to insert operation */ + tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties, + interval, false); + + return tsnep_set_gcl_change(gcl, ref, change, true); +} + +static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension) +{ + int ref = gcl->count - 1; + u32 interval = gcl->operation[ref].interval + extension; + + start -= gcl->operation[ref].interval; + + return tsnep_insert_gcl_operation(gcl, ref, start, interval); +} + +static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time) +{ + u64 sum = 0; + int i; + + /* find operation which shall be cutted */ + for (i = 0; i < gcl->count; i++) { + u64 sum_tmp = sum + gcl->operation[i].interval; + u64 interval; + + /* sum up operations as long as cycle time is not exceeded */ + if (sum_tmp > cycle_time) + break; + + /* remaining interval must be big enough for hardware */ + interval = cycle_time - sum_tmp; + if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL) + break; + + sum = sum_tmp; + } + if (sum == cycle_time) { + /* no need to cut operation itself or whole cycle + * => change exactly at operation + */ + return tsnep_set_gcl_change(gcl, i, start + sum, false); + } + return tsnep_insert_gcl_operation(gcl, i, start + sum, + cycle_time - sum); +} + +static int tsnep_enable_gcl(struct tsnep_adapter *adapter, + struct tsnep_gcl *gcl, struct tsnep_gcl *curr) +{ + u64 system_time; + u64 timeout; + u64 limit; + + /* estimate timeout limit after timeout enable, actually timeout limit + * in hardware will be earlier than estimate so we are on the safe side + */ + tsnep_get_system_time(adapter, &system_time); + timeout = system_time + TSNEP_GC_TIMEOUT; + + if (curr) + limit = timeout + curr->change_limit; + else + limit = timeout; + + gcl->start_time = tsnep_gcl_start_after(gcl, limit); + + /* gate control time register is only 32bit => time shall be in the near + * future (no driver support for far future implemented) + */ + if ((gcl->start_time - system_time) >= U32_MAX) + return -EAGAIN; + + if (curr) { + /* change gate control list */ + u64 last; + u64 change; + + last = tsnep_gcl_start_before(curr, gcl->start_time); + if ((last + curr->cycle_time) == gcl->start_time) + change = tsnep_cut_gcl(curr, last, + gcl->start_time - last); + else if (((gcl->start_time - last) <= + curr->cycle_time_extension) || + ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL)) + change = tsnep_extend_gcl(curr, last, + gcl->start_time - last); + else + change = tsnep_cut_gcl(curr, last, + gcl->start_time - last); + + WARN_ON(change <= timeout); + gcl->change = true; + iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE); + } else { + /* start gate control list */ + WARN_ON(gcl->start_time <= timeout); + gcl->change = false; + iowrite32(gcl->start_time & 0xFFFFFFFF, + adapter->addr + TSNEP_GC_TIME); + } + + return 0; +} + +static int tsnep_taprio(struct tsnep_adapter *adapter, + struct tc_taprio_qopt_offload *qopt) +{ + struct tsnep_gcl *gcl; + struct tsnep_gcl *curr; + int retval; + + if (!adapter->gate_control) + return -EOPNOTSUPP; + + if (!qopt->enable) { + /* disable gate control if active */ + mutex_lock(&adapter->gate_control_lock); + + if (adapter->gate_control_active) { + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); + adapter->gate_control_active = false; + } + + mutex_unlock(&adapter->gate_control_lock); + + return 0; + } + + retval = tsnep_validate_gcl(qopt); + if (retval) + return retval; + + mutex_lock(&adapter->gate_control_lock); + + gcl = &adapter->gcl[adapter->next_gcl]; + tsnep_write_gcl(gcl, qopt); + + /* select current gate control list if active */ + if (adapter->gate_control_active) { + if (adapter->next_gcl == 0) + curr = &adapter->gcl[1]; + else + curr = &adapter->gcl[0]; + } else { + curr = NULL; + } + + for (;;) { + /* start timeout which discards late enable, this helps ensuring + * that start/change time are in the future at enable + */ + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); + + retval = tsnep_enable_gcl(adapter, gcl, curr); + if (retval) { + mutex_unlock(&adapter->gate_control_lock); + + return retval; + } + + /* enable gate control list */ + if (adapter->next_gcl == 0) + iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC); + else + iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC); + + /* done if timeout did not happen */ + if (!(ioread32(adapter->addr + TSNEP_GC) & + TSNEP_GC_TIMEOUT_SIGNAL)) + break; + + /* timeout is acknowledged with any enable */ + iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC); + + if (curr) + tsnep_clean_gcl(curr); + + /* retry because of timeout */ + } + + adapter->gate_control_active = true; + + if (adapter->next_gcl == 0) + adapter->next_gcl = 1; + else + adapter->next_gcl = 0; + + mutex_unlock(&adapter->gate_control_lock); + + return 0; +} + +int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return tsnep_taprio(adapter, type_data); + default: + return -EOPNOTSUPP; + } +} + +int tsnep_tc_init(struct tsnep_adapter *adapter) +{ + if (!adapter->gate_control) + return 0; + + /* open all gates */ + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); + iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC); + + adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A; + adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B; + + return 0; +} + +void tsnep_tc_cleanup(struct tsnep_adapter *adapter) +{ + if (!adapter->gate_control) + return; + + if (adapter->gate_control_active) { + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); + adapter->gate_control_active = false; + } +} diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index b1c8ffea6ad2..437c5acfe222 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs, } static void ethoc_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ethoc *priv = netdev_priv(dev); @@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev, } static int ethoc_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ethoc *priv = netdev_priv(dev); @@ -1074,14 +1078,11 @@ static int ethoc_probe(struct platform_device *pdev) /* obtain device IRQ number */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, "cannot obtain IRQ\n"); - ret = -ENXIO; + ret = platform_get_irq(pdev, 0); + if (ret < 0) goto free; - } - netdev->irq = res->start; + netdev->irq = ret; /* setup driver-private data */ priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 97c5d70de76e..691605c15265 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1178,8 +1178,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static void ftgmac100_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) +static void +ftgmac100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct ftgmac100 *priv = netdev_priv(netdev); @@ -1190,8 +1193,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev, ering->tx_pending = priv->tx_q_entries; } -static int ftgmac100_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) +static int +ftgmac100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct ftgmac100 *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 6b2927d863e2..c78883c3a2c8 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2325,7 +2325,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) txq = netdev_get_tx_queue(net_dev, queue_mapping); /* LLTX requires to do our own update of trans_start */ - txq->trans_start = jiffies; + txq_trans_cond_update(txq); if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); @@ -2531,7 +2531,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev, /* Bump the trans_start */ txq = netdev_get_tx_queue(net_dev, smp_processor_id()); - txq->trans_start = jiffies; + txq_trans_cond_update(txq); err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd); if (err) { @@ -2623,7 +2623,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr, } break; default: - bpf_warn_invalid_xdp_action(xdp_act); + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 714e961e7a77..d21ba70ef4a3 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -374,7 +374,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); break; default: - bpf_warn_invalid_xdp_action(xdp_act); + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); @@ -4550,10 +4550,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_portal_free(priv->mc_io); - free_netdev(net_dev); + destroy_workqueue(priv->dpaa2_ptp_wq); dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + free_netdev(net_dev); + return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 2085844227fe..e54e70ebdd05 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats { __u64 bytes_per_cdan; }; +#define DPAA2_ETH_CH_STATS 7 + /* Maximum number of queues associated with a DPNI */ #define DPAA2_ETH_MAX_TCS 8 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16 diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index adb8ce5306ee..3fdbf87dccb1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, /* Per-channel stats */ for (k = 0; k < priv->num_channels; k++) { ch_stats = &priv->channel[k]->stats; - for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++) + for (j = 0; j < DPAA2_ETH_CH_STATS; j++) *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j); } i += j; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index ef8f0a055024..623d113b6581 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -2,6 +2,7 @@ /* Copyright 2019 NXP */ #include <linux/acpi.h> +#include <linux/pcs-lynx.h> #include <linux/property.h> #include "dpaa2-eth.h" @@ -40,7 +41,7 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode) static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev, u16 dpmac_id) { - struct fwnode_handle *fwnode, *parent, *child = NULL; + struct fwnode_handle *fwnode, *parent = NULL, *child = NULL; struct device_node *dpmacs = NULL; int err; u32 id; @@ -53,8 +54,17 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev, parent = of_fwnode_handle(dpmacs); } else if (is_acpi_node(fwnode)) { parent = fwnode; + } else { + /* The root dprc device didn't yet get to finalize it's probe, + * thus the fwnode field is not yet set. Defer probe if we are + * facing this situation. + */ + return ERR_PTR(-EPROBE_DEFER); } + if (!parent) + return NULL; + fwnode_for_each_child_node(parent, child) { err = -EINVAL; if (is_acpi_device_node(child)) @@ -90,88 +100,6 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node, return err; } -static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac, - phy_interface_t interface) -{ - switch (interface) { - /* We can switch between SGMII and 1000BASE-X at runtime with - * pcs-lynx - */ - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - if (mac->pcs && - (mac->if_mode == PHY_INTERFACE_MODE_SGMII || - mac->if_mode == PHY_INTERFACE_MODE_1000BASEX)) - return false; - return interface != mac->if_mode; - - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_USXGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - return (interface != mac->if_mode); - default: - return true; - } -} - -static void dpaa2_mac_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - if (state->interface != PHY_INTERFACE_MODE_NA && - dpaa2_mac_phy_mode_mismatch(mac, state->interface)) { - goto empty_set; - } - - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - switch (state->interface) { - case PHY_INTERFACE_MODE_NA: - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_USXGMII: - phylink_set_10g_modes(mask); - if (state->interface == PHY_INTERFACE_MODE_10GBASER) - break; - phylink_set(mask, 5000baseT_Full); - phylink_set(mask, 2500baseT_Full); - fallthrough; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 1000baseT_Full); - if (state->interface == PHY_INTERFACE_MODE_1000BASEX) - break; - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 10baseT_Full); - break; - default: - goto empty_set; - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - - return; - -empty_set: - linkmode_zero(supported); -} - static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -243,7 +171,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops dpaa2_mac_phylink_ops = { - .validate = dpaa2_mac_validate, + .validate = phylink_generic_validate, .mac_config = dpaa2_mac_config, .mac_link_up = dpaa2_mac_link_up, .mac_link_down = dpaa2_mac_link_down, @@ -286,11 +214,13 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac, static void dpaa2_pcs_destroy(struct dpaa2_mac *mac) { - struct lynx_pcs *pcs = mac->pcs; + struct phylink_pcs *phylink_pcs = mac->pcs; - if (pcs) { - struct device *dev = &pcs->mdio->dev; - lynx_pcs_destroy(pcs); + if (phylink_pcs) { + struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs); + struct device *dev = &mdio->dev; + + lynx_pcs_destroy(phylink_pcs); put_device(dev); mac->pcs = NULL; } @@ -336,9 +266,34 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) return err; } + memset(&mac->phylink_config, 0, sizeof(mac->phylink_config)); mac->phylink_config.dev = &net_dev->dev; mac->phylink_config.type = PHYLINK_NETDEV; + mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | + MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD | + MAC_10000FD; + + /* We support the current interface mode, and if we have a PCS + * similar interface modes that do not require the PLLs to be + * reconfigured. + */ + __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces); + if (mac->pcs) { + switch (mac->if_mode) { + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_SGMII: + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + mac->phylink_config.supported_interfaces); + break; + + default: + break; + } + } + phylink = phylink_create(&mac->phylink_config, dpmac_node, mac->if_mode, &dpaa2_mac_phylink_ops); @@ -349,7 +304,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) mac->phylink = phylink; if (mac->pcs) - phylink_set_pcs(mac->phylink, &mac->pcs->pcs); + phylink_set_pcs(mac->phylink, mac->pcs); err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0); if (err) { @@ -381,6 +336,7 @@ int dpaa2_mac_open(struct dpaa2_mac *mac) { struct fsl_mc_device *dpmac_dev = mac->mc_dev; struct net_device *net_dev = mac->net_dev; + struct fwnode_handle *fw_node; int err; err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id, @@ -400,7 +356,13 @@ int dpaa2_mac_open(struct dpaa2_mac *mac) /* Find the device node representing the MAC device and link the device * behind the associated netdev to it. */ - mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id); + fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id); + if (IS_ERR(fw_node)) { + err = PTR_ERR(fw_node); + goto err_close_dpmac; + } + + mac->fw_node = fw_node; net_dev->dev.of_node = to_of_node(mac->fw_node); return 0; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h index 7842cbb2207a..1331a8477fe4 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h @@ -7,7 +7,6 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phylink.h> -#include <linux/pcs-lynx.h> #include "dpmac.h" #include "dpmac-cmd.h" @@ -23,7 +22,7 @@ struct dpaa2_mac { struct phylink *phylink; phy_interface_t if_mode; enum dpmac_link_type if_link_type; - struct lynx_pcs *pcs; + struct phylink_pcs *pcs; struct fwnode_handle *fw_node; }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index d039457928b0..9b5512b4f15d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -394,7 +394,8 @@ static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid) for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { ppriv_local = ethsw->ports[i]; - ppriv_local->vlans[vid] = 0; + if (ppriv_local) + ppriv_local->vlans[vid] = 0; } return 0; @@ -1896,9 +1897,11 @@ static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid /* Delete VLAN from switch if it is no longer configured on * any port */ - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { + if (ethsw->ports[i] && + ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER) return 0; /* Found a port member in VID */ + } ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 504e12554079..d6930a797c6c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1547,7 +1547,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, switch (xdp_act) { default: - bpf_warn_invalid_xdp_action(xdp_act); + bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->ndev, prog, xdp_act); @@ -2897,12 +2897,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; } err = pci_request_mem_regions(pdev, name); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 910b9f722504..fa5b4f885b17 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -562,7 +562,9 @@ static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, } static void enetc_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 0e87c7043b77..ed16a5ac9ad0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -8,6 +8,7 @@ #include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/of_net.h> +#include <linux/pcs-lynx.h> #include "enetc_ierb.h" #include "enetc_pf.h" @@ -828,8 +829,8 @@ static int enetc_imdio_create(struct enetc_pf *pf) { struct device *dev = &pf->si->pdev->dev; struct enetc_mdio_priv *mdio_priv; - struct lynx_pcs *pcs_lynx; - struct mdio_device *pcs; + struct phylink_pcs *phylink_pcs; + struct mdio_device *mdio_device; struct mii_bus *bus; int err; @@ -853,23 +854,23 @@ static int enetc_imdio_create(struct enetc_pf *pf) goto free_mdio_bus; } - pcs = mdio_device_create(bus, 0); - if (IS_ERR(pcs)) { - err = PTR_ERR(pcs); - dev_err(dev, "cannot create pcs (%d)\n", err); + mdio_device = mdio_device_create(bus, 0); + if (IS_ERR(mdio_device)) { + err = PTR_ERR(mdio_device); + dev_err(dev, "cannot create mdio device (%d)\n", err); goto unregister_mdiobus; } - pcs_lynx = lynx_pcs_create(pcs); - if (!pcs_lynx) { - mdio_device_free(pcs); + phylink_pcs = lynx_pcs_create(mdio_device); + if (!phylink_pcs) { + mdio_device_free(mdio_device); err = -ENOMEM; dev_err(dev, "cannot create lynx pcs (%d)\n", err); goto unregister_mdiobus; } pf->imdio = bus; - pf->pcs = pcs_lynx; + pf->pcs = phylink_pcs; return 0; @@ -882,8 +883,11 @@ free_mdio_bus: static void enetc_imdio_remove(struct enetc_pf *pf) { + struct mdio_device *mdio_device; + if (pf->pcs) { - mdio_device_free(pf->pcs->mdio); + mdio_device = lynx_get_mdio_device(pf->pcs); + mdio_device_free(mdio_device); lynx_pcs_destroy(pf->pcs); } if (pf->imdio) { @@ -930,45 +934,6 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf) enetc_imdio_remove(pf); } -static void enetc_pl_mac_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_INTERNAL && - state->interface != PHY_INTERFACE_MODE_SGMII && - state->interface != PHY_INTERFACE_MODE_2500BASEX && - state->interface != PHY_INTERFACE_MODE_USXGMII && - !phy_interface_mode_is_rgmii(state->interface)) { - linkmode_zero(supported); - return; - } - - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 1000baseT_Half); - phylink_set(mask, 1000baseT_Full); - - if (state->interface == PHY_INTERFACE_MODE_INTERNAL || - state->interface == PHY_INTERFACE_MODE_2500BASEX || - state->interface == PHY_INTERFACE_MODE_USXGMII) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - static void enetc_pl_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) @@ -980,7 +945,7 @@ static void enetc_pl_mac_config(struct phylink_config *config, priv = netdev_priv(pf->si->ndev); if (pf->pcs) - phylink_set_pcs(priv->phylink, &pf->pcs->pcs); + phylink_set_pcs(priv->phylink, pf->pcs); } static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex) @@ -1096,7 +1061,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops enetc_mac_phylink_ops = { - .validate = enetc_pl_mac_validate, + .validate = phylink_generic_validate, .mac_config = enetc_pl_mac_config, .mac_link_up = enetc_pl_mac_link_up, .mac_link_down = enetc_pl_mac_link_down, @@ -1111,6 +1076,18 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv, pf->phylink_config.dev = &priv->ndev->dev; pf->phylink_config.type = PHYLINK_NETDEV; + pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + pf->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_USXGMII, + pf->phylink_config.supported_interfaces); + phy_interface_set_rgmii(pf->phylink_config.supported_interfaces); phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), pf->if_mode, &enetc_mac_phylink_ops); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h index 263946c51e37..c26bd66e4597 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -2,7 +2,7 @@ /* Copyright 2017-2019 NXP */ #include "enetc.h" -#include <linux/pcs-lynx.h> +#include <linux/phylink.h> #define ENETC_PF_NUM_RINGS 8 @@ -46,7 +46,7 @@ struct enetc_pf { struct mii_bus *mdio; /* saved for cleanup */ struct mii_bus *imdio; - struct lynx_pcs *pcs; + struct phylink_pcs *pcs; phy_interface_t if_mode; struct phylink_config phylink_config; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index 36b4f51dd297..17c097cef7d4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -42,15 +42,10 @@ static int enetc_ptp_probe(struct pci_dev *pdev, if (err) return dev_err_probe(&pdev->dev, err, "device enable failed\n"); - /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; } err = pci_request_mem_regions(pdev, KBUILD_MODNAME); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 0536d2c76fbc..3555c12edb45 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1182,7 +1182,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } /* parsing gate action */ - if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { + if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) { NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); err = -ENOSPC; goto free_filter; @@ -1202,7 +1202,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } refcount_set(&sgi->refcount, 1); - sgi->index = entryg->gate.index; + sgi->index = entryg->hw_index; sgi->init_ipv = entryg->gate.prio; sgi->basetime = entryg->gate.basetime; sgi->cycletime = entryg->gate.cycletime; @@ -1244,7 +1244,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, refcount_set(&fmi->refcount, 1); fmi->cir = entryp->police.rate_bytes_ps; fmi->cbs = entryp->police.burst; - fmi->index = entryp->police.index; + fmi->index = entryp->hw_index; filter->flags |= ENETC_PSFP_FLAGS_FMI; filter->fmi_index = fmi->index; sfi->meter_id = fmi->index; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 7b4961daa254..ed7301b69169 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -377,6 +377,9 @@ struct bufdesc_ex { #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \ + (((X) == 1) ? FEC_ENET_RXF_1 : \ + FEC_ENET_RXF_2)) #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bc418b910999..796133de527e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1185,6 +1185,21 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) } } +static void fec_irqs_disable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(0, fep->hwp + FEC_IMASK); +} + +static void fec_irqs_disable_except_wakeup(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + writel(0, fep->hwp + FEC_IMASK); + writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); +} + static void fec_stop(struct net_device *ndev) { @@ -1211,15 +1226,13 @@ fec_stop(struct net_device *ndev) writel(1, fep->hwp + FEC_ECNTRL); udelay(10); } - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } else { - writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); val = readl(fep->hwp + FEC_ECNTRL); val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); writel(val, fep->hwp + FEC_ECNTRL); - fec_enet_stop_mode(fep, true); } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && @@ -1480,7 +1493,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); + writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); /* Check for errors. */ status ^= BD_ENET_RX_LAST; @@ -2877,15 +2890,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) return -EINVAL; device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); - if (device_may_wakeup(&ndev->dev)) { + if (device_may_wakeup(&ndev->dev)) fep->wol_flag |= FEC_WOL_FLAG_ENABLE; - if (fep->wake_irq > 0) - enable_irq_wake(fep->wake_irq); - } else { + else fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); - if (fep->wake_irq > 0) - disable_irq_wake(fep->wake_irq); - } return 0; } @@ -3558,7 +3566,7 @@ static int fec_enet_init(struct net_device *ndev) ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; if (fep->quirks & FEC_QUIRK_HAS_CSUM) { - ndev->gso_max_segs = FEC_MAX_TSO_SEGS; + netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS); /* enable hw accelerator */ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM @@ -4057,9 +4065,19 @@ static int __maybe_unused fec_suspend(struct device *dev) netif_device_detach(ndev); netif_tx_unlock_bh(ndev); fec_stop(ndev); - fec_enet_clk_enable(ndev, false); - if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { + fec_irqs_disable(ndev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + } else { + fec_irqs_disable_except_wakeup(ndev); + if (fep->wake_irq > 0) { + disable_irq(fep->wake_irq); + enable_irq_wake(fep->wake_irq); + } + fec_enet_stop_mode(fep, true); + } + /* It's safe to disable clocks since interrupts are masked */ + fec_enet_clk_enable(ndev, false); } rtnl_unlock(); @@ -4097,6 +4115,10 @@ static int __maybe_unused fec_resume(struct device *dev) } if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { fec_enet_stop_mode(fep, false); + if (fep->wake_irq) { + disable_irq_wake(fep->wake_irq); + enable_irq(fep->wake_irq); + } val = readl(fep->hwp + FEC_ECNTRL); val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index d71eac7e1924..af99017a5453 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -473,10 +473,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: fep->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index ce0a121580f6..8f0db61cb1f6 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2727,7 +2727,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) fman = kzalloc(sizeof(*fman), GFP_KERNEL); if (!fman) - return NULL; + return ERR_PTR(-ENOMEM); fm_node = of_node_get(of_dev->dev.of_node); @@ -2740,26 +2740,21 @@ static struct fman *read_dts_node(struct platform_device *of_dev) fman->dts_params.id = (u8)val; /* Get the FM interrupt */ - res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n", - __func__); + err = platform_get_irq(of_dev, 0); + if (err < 0) goto fman_node_put; - } - irq = res->start; + irq = err; /* Get the FM error interrupt */ - res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1); - if (!res) { - dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n", - __func__); + err = platform_get_irq(of_dev, 1); + if (err < 0) goto fman_node_put; - } - fman->dts_params.err_irq = res->start; + fman->dts_params.err_irq = err; /* Get the FM address */ res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); if (!res) { + err = -EINVAL; dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", __func__); goto fman_node_put; @@ -2770,6 +2765,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) clk = of_clk_get(fm_node, 0); if (IS_ERR(clk)) { + err = PTR_ERR(clk); dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n", __func__, fman->dts_params.id); goto fman_node_put; @@ -2777,6 +2773,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) clk_rate = clk_get_rate(clk); if (!clk_rate) { + err = -EINVAL; dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n", __func__, fman->dts_params.id); goto fman_node_put; @@ -2797,6 +2794,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) /* Get the MURAM base address and size */ muram_node = of_find_matching_node(fm_node, fman_muram_match); if (!muram_node) { + err = -EINVAL; dev_err(&of_dev->dev, "%s: could not find MURAM node\n", __func__); goto fman_free; @@ -2836,6 +2834,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) devm_request_mem_region(&of_dev->dev, phys_base_addr, mem_size, "fman"); if (!fman->dts_params.res) { + err = -EBUSY; dev_err(&of_dev->dev, "%s: request_mem_region() failed\n", __func__); goto fman_free; @@ -2844,6 +2843,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) fman->dts_params.base_addr = devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); if (!fman->dts_params.base_addr) { + err = -ENOMEM; dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); goto fman_free; } @@ -2868,7 +2868,7 @@ fman_node_put: of_node_put(fm_node); fman_free: kfree(fman); - return NULL; + return ERR_PTR(err); } static int fman_probe(struct platform_device *of_dev) @@ -2880,8 +2880,8 @@ static int fman_probe(struct platform_device *of_dev) dev = &of_dev->dev; fman = read_dts_node(of_dev); - if (!fman) - return -EIO; + if (IS_ERR(fman)) + return PTR_ERR(fman); err = fman_config(fman); if (err) { diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index d9baac0dbc7d..4c9d05c45c03 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev) fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; - goto return_err; + goto put_device; } err = of_property_read_u32(port_node, "cell-index", &val); @@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev) dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", __func__, port_node); err = -EINVAL; - goto return_err; + goto put_device; } port_id = (u8)val; port->dts_params.id = port_id; @@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev) } else { dev_err(port->dev, "%s: Illegal port type\n", __func__); err = -EINVAL; - goto return_err; + goto put_device; } port->dts_params.type = port_type; @@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev) dev_err(port->dev, "%s: incorrect qman-channel-id\n", __func__); err = -EINVAL; - goto return_err; + goto put_device; } port->dts_params.qman_channel_id = qman_channel_id; } @@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev) dev_err(port->dev, "%s: of_address_to_resource() failed\n", __func__); err = -ENOMEM; - goto return_err; + goto put_device; } port->dts_params.fman = fman; @@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev) return 0; +put_device: + put_device(&fm_pdev->dev); return_err: of_node_put(port_node); free_port: diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index d9fc5c456bf3..39ae965cd4f6 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -94,14 +94,17 @@ static void mac_exception(void *handle, enum fman_mac_exceptions ex) __func__, ex); } -static void set_fman_mac_params(struct mac_device *mac_dev, - struct fman_mac_params *params) +static int set_fman_mac_params(struct mac_device *mac_dev, + struct fman_mac_params *params) { struct mac_priv_s *priv = mac_dev->priv; params->base_addr = (typeof(params->base_addr)) devm_ioremap(priv->dev, mac_dev->res->start, resource_size(mac_dev->res)); + if (!params->base_addr) + return -ENOMEM; + memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); params->max_speed = priv->max_speed; params->phy_if = mac_dev->phy_if; @@ -112,6 +115,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev, params->event_cb = mac_exception; params->dev_id = mac_dev; params->internal_phy_node = priv->internal_phy_node; + + return 0; } static int tgec_initialization(struct mac_device *mac_dev) @@ -123,7 +128,9 @@ static int tgec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = tgec_config(¶ms); if (!mac_dev->fman_mac) { @@ -169,7 +176,9 @@ static int dtsec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = dtsec_config(¶ms); if (!mac_dev->fman_mac) { @@ -218,7 +227,9 @@ static int memac_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; if (priv->max_speed == SPEED_10000) params.phy_if = PHY_INTERFACE_MODE_XGMII; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index acab58fd3db3..206b7a35eaf5 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2076,10 +2076,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: priv->hwts_tx_en = 0; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7b32ed29bf4c..ff756265d58f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -372,7 +372,9 @@ static int gfar_scoalesce(struct net_device *dev, * rx, rx_mini, and rx_jumbo rings are the same size, as mini and * jumbo are ignored by the driver */ static void gfar_gringparam(struct net_device *dev, - struct ethtool_ringparam *rvals) + struct ethtool_ringparam *rvals, + struct kernel_ethtool_ringparam *kernel_rvals, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; @@ -399,7 +401,9 @@ static void gfar_gringparam(struct net_device *dev, * necessary so that we don't mess things up while we're in motion. */ static int gfar_sringparam(struct net_device *dev, - struct ethtool_ringparam *rvals) + struct ethtool_ringparam *rvals, + struct kernel_ethtool_ringparam *kernel_rvals, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); int err = 0, i; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 14c08a868190..69b2b98b1525 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -207,7 +207,9 @@ uec_get_regs(struct net_device *netdev, static void uec_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct ucc_geth_info *ug_info = ugeth->ug_info; @@ -226,7 +228,9 @@ uec_get_ringparam(struct net_device *netdev, static int uec_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct ucc_geth_info *ug_info = ugeth->ug_info; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 0b68852379da..5b8b9bcf41a2 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -47,7 +47,6 @@ struct tgec_mdio_controller { #define MDIO_CTL_READ BIT(15) #define MDIO_DATA(x) (x & 0xffff) -#define MDIO_DATA_BSY BIT(31) struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index b719f72281c4..5f5d4f7aa813 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -229,6 +229,7 @@ struct gve_rx_ring { /* A TX desc ring entry */ union gve_tx_desc { struct gve_tx_pkt_desc pkt; /* first desc for a packet */ + struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */ struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ }; @@ -441,13 +442,13 @@ struct gve_tx_ring { * associated with that irq. */ struct gve_notify_block { - __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */ + __be32 *irq_db_index; /* pointer to idx into Bar2 */ char name[IFNAMSIZ + 16]; /* name registered with the kernel */ struct napi_struct napi; /* kernel napi struct for this block */ struct gve_priv *priv; struct gve_tx_ring *tx; /* tx rings on this block */ struct gve_rx_ring *rx; /* rx rings on this block */ -} ____cacheline_aligned; +}; /* Tracks allowed and current queue settings */ struct gve_queue_config { @@ -466,6 +467,10 @@ struct gve_options_dqo_rda { u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ }; +struct gve_irq_db { + __be32 index; +} ____cacheline_aligned; + struct gve_ptype { u8 l3_type; /* `gve_l3_type` in gve_adminq.h */ u8 l4_type; /* `gve_l4_type` in gve_adminq.h */ @@ -492,7 +497,8 @@ struct gve_priv { struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ struct gve_queue_page_list *qpls; /* array of num qpls */ struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ - dma_addr_t ntfy_block_bus; + struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ + dma_addr_t irq_db_indices_bus; struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */ char mgmt_msix_name[IFNAMSIZ + 16]; u32 mgmt_msix_idx; @@ -551,6 +557,8 @@ struct gve_priv { u32 page_alloc_fail; /* count of page alloc fails */ u32 dma_mapping_error; /* count of dma mapping errors */ u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */ + u32 suspend_cnt; /* count of times suspended */ + u32 resume_cnt; /* count of times resumed */ struct workqueue_struct *gve_wq; struct work_struct service_task; struct work_struct stats_report_task; @@ -567,6 +575,7 @@ struct gve_priv { /* Gvnic device link speed from hypervisor. */ u64 link_speed; + bool up_before_suspend; /* True if dev was up before suspend */ struct gve_options_dqo_rda options_dqo_rda; struct gve_ptype_lut *ptype_lut_dqo; @@ -575,6 +584,10 @@ struct gve_priv { int data_buffer_size_dqo; enum gve_queue_format queue_format; + + /* Interrupt coalescing settings */ + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; }; enum gve_service_task_flags_bit { @@ -733,7 +746,7 @@ static inline void gve_clear_report_stats(struct gve_priv *priv) static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv, struct gve_notify_block *block) { - return &priv->db_bar2[be32_to_cpu(block->irq_db_index)]; + return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)]; } /* Returns the index into ntfy_blocks of the given tx ring's block diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 83ae56c310d3..2ad7f57f7e5b 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -462,7 +462,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, .num_counters = cpu_to_be32(num_counters), .irq_db_addr = cpu_to_be64(db_array_bus_addr), .num_irq_dbs = cpu_to_be32(num_ntfy_blks), - .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])), + .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)), .ntfy_blk_msix_base_idx = cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), .queue_format = priv->queue_format, @@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) * is not set to GqiRda, choose the queue format in a priority order: * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default. */ - if (priv->queue_format == GVE_GQI_RDA_FORMAT) { - dev_info(&priv->pdev->dev, - "Driver is running with GQI RDA queue format.\n"); - } else if (dev_op_dqo_rda) { + if (dev_op_dqo_rda) { priv->queue_format = GVE_DQO_RDA_FORMAT; dev_info(&priv->pdev->dev, "Driver is running with DQO RDA queue format.\n"); @@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv) "Driver is running with GQI RDA queue format.\n"); supported_features_mask = be32_to_cpu(dev_op_gqi_rda->supported_features_mask); + } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) { + dev_info(&priv->pdev->dev, + "Driver is running with GQI RDA queue format.\n"); } else { priv->queue_format = GVE_GQI_QPL_FORMAT; if (dev_op_gqi_qpl) diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h index 4d225a18d8ce..f4ae9e19b844 100644 --- a/drivers/net/ethernet/google/gve/gve_desc.h +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -33,6 +33,14 @@ struct gve_tx_pkt_desc { __be64 seg_addr; /* Base address (see note) of this segment */ } __packed; +struct gve_tx_mtd_desc { + u8 type_flags; /* type is lower 4 bits, subtype upper */ + u8 path_state; /* state is lower 4 bits, hash type upper */ + __be16 reserved0; + __be32 path_hash; + __be64 reserved1; +} __packed; + struct gve_tx_seg_desc { u8 type_flags; /* type is lower 4 bits, flags upper */ u8 l3_offset; /* TSO: 2 byte units to start of IPH */ @@ -46,6 +54,7 @@ struct gve_tx_seg_desc { #define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */ #define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */ #define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */ +#define GVE_TXD_MTD (0x3 << 4) /* Metadata */ /* GVE Transmit Descriptor Flags for Std Pkts */ #define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */ @@ -54,6 +63,17 @@ struct gve_tx_seg_desc { /* GVE Transmit Descriptor Flags for TSO Segs */ #define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */ +/* GVE Transmit Descriptor Options for MTD Segs */ +#define GVE_MTD_SUBTYPE_PATH 0 + +#define GVE_MTD_PATH_STATE_DEFAULT 0 +#define GVE_MTD_PATH_STATE_TIMEOUT 1 +#define GVE_MTD_PATH_STATE_CONGESTION 2 +#define GVE_MTD_PATH_STATE_RETRANSMIT 3 + +#define GVE_MTD_PATH_HASH_NONE (0x0 << 4) +#define GVE_MTD_PATH_HASH_L4 (0x1 << 4) + /* GVE Receive Packet Descriptor */ /* The start of an ethernet packet comes 2 bytes into the rx buffer. * gVNIC adds this padding so that both the DMA and the L3/4 protocol header diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h index 836042364124..1eb4d5fd8561 100644 --- a/drivers/net/ethernet/google/gve/gve_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_dqo.h @@ -18,6 +18,7 @@ #define GVE_TX_IRQ_RATELIMIT_US_DQO 50 #define GVE_RX_IRQ_RATELIMIT_US_DQO 20 +#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2) /* Timeout in seconds to wait for a reinjection completion after receiving * its corresponding miss completion. @@ -54,17 +55,17 @@ gve_tx_put_doorbell_dqo(const struct gve_priv *priv, } /* Builds register value to write to DQO IRQ doorbell to enable with specified - * ratelimit. + * ITR interval. */ -static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us) +static inline u32 gve_setup_itr_interval_dqo(u32 interval_us) { u32 result = GVE_ITR_ENABLE_BIT_DQO; /* Interval has 2us granularity. */ - ratelimit_us >>= 1; + interval_us >>= 1; - ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK; - result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT); + interval_us &= GVE_ITR_INTERVAL_DQO_MASK; + result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT); return result; } @@ -73,9 +74,20 @@ static inline void gve_write_irq_doorbell_dqo(const struct gve_priv *priv, const struct gve_notify_block *block, u32 val) { - u32 index = be32_to_cpu(block->irq_db_index); + u32 index = be32_to_cpu(*block->irq_db_index); iowrite32(val, &priv->db_bar2[index]); } +/* Sets interrupt throttling interval and enables interrupt + * by writing to IRQ doorbell. + */ +static inline void +gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv, + struct gve_notify_block *block, + u32 usecs) +{ + gve_write_irq_doorbell_dqo(priv, block, + gve_setup_itr_interval_dqo(usecs)); +} #endif /* _GVE_DQO_H_ */ diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index c8df47a97fa4..50b384910c83 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -8,6 +8,7 @@ #include <linux/rtnetlink.h> #include "gve.h" #include "gve_adminq.h" +#include "gve_dqo.h" static void gve_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) @@ -42,7 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { }; static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { - "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]", + "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", @@ -50,7 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { }; static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { - "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]", + "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", "tx_dma_mapping_error[%u]", }; @@ -139,10 +140,11 @@ static void gve_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, - tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes; + u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, + tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, + tmp_tx_pkts, tmp_tx_bytes; u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, - rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes; + rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped; int stats_idx, base_stats_idx, max_stats_idx; struct stats *report_stats; int *rx_qid_to_stats_idx; @@ -191,7 +193,7 @@ gve_get_ethtool_stats(struct net_device *netdev, rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; } } - for (tx_pkts = 0, tx_bytes = 0, ring = 0; + for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; ring < priv->tx_cfg.num_queues; ring++) { if (priv->tx) { do { @@ -203,6 +205,7 @@ gve_get_ethtool_stats(struct net_device *netdev, start)); tx_pkts += tmp_tx_pkts; tx_bytes += tmp_tx_bytes; + tx_dropped += priv->tx[ring].dropped_pkt; } } @@ -214,9 +217,7 @@ gve_get_ethtool_stats(struct net_device *netdev, /* total rx dropped packets */ data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + rx_desc_err_dropped_pkt; - /* Skip tx_dropped */ - i++; - + data[i++] = tx_dropped; data[i++] = priv->tx_timeo_cnt; data[i++] = rx_skb_alloc_fail; data[i++] = rx_buf_alloc_fail; @@ -255,6 +256,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx->fill_cnt; data[i++] = rx->cnt; + data[i++] = rx->fill_cnt - rx->cnt; do { start = u64_stats_fetch_begin(&priv->rx[ring].statss); @@ -318,12 +320,14 @@ gve_get_ethtool_stats(struct net_device *netdev, if (gve_is_gqi(priv)) { data[i++] = tx->req; data[i++] = tx->done; + data[i++] = tx->req - tx->done; } else { /* DQO doesn't currently support * posted/completed descriptor counts; */ data[i++] = 0; data[i++] = 0; + data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; } do { start = @@ -419,7 +423,9 @@ static int gve_set_channels(struct net_device *netdev, } static void gve_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *cmd) + struct ethtool_ringparam *cmd, + struct kernel_ethtool_ringparam *kernel_cmd, + struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(netdev); @@ -535,7 +541,65 @@ static int gve_get_link_ksettings(struct net_device *netdev, return err; } +static int gve_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + + if (gve_is_gqi(priv)) + return -EOPNOTSUPP; + ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; + ec->rx_coalesce_usecs = priv->rx_coalesce_usecs; + + return 0; +} + +static int gve_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_ec, + struct netlink_ext_ack *extack) +{ + struct gve_priv *priv = netdev_priv(netdev); + u32 tx_usecs_orig = priv->tx_coalesce_usecs; + u32 rx_usecs_orig = priv->rx_coalesce_usecs; + int idx; + + if (gve_is_gqi(priv)) + return -EOPNOTSUPP; + + if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO || + ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO) + return -EINVAL; + priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; + priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; + + if (tx_usecs_orig != priv->tx_coalesce_usecs) { + for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { + int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->tx_coalesce_usecs); + } + } + + if (rx_usecs_orig != priv->rx_coalesce_usecs) { + for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { + int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->rx_coalesce_usecs); + } + } + + return 0; +} + const struct ethtool_ops gve_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = gve_get_drvinfo, .get_strings = gve_get_strings, .get_sset_count = gve_get_sset_count, @@ -545,6 +609,8 @@ const struct ethtool_ops gve_ethtool_ops = { .set_channels = gve_set_channels, .get_channels = gve_get_channels, .get_link = ethtool_op_get_link, + .get_coalesce = gve_get_coalesce, + .set_coalesce = gve_set_coalesce, .get_ringparam = gve_get_ringparam, .reset = gve_user_reset, .get_tunable = gve_get_tunable, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 59b66f679e46..f7f65c4bf993 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -334,15 +334,23 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); goto abort_with_msix_enabled; } - priv->ntfy_blocks = + priv->irq_db_indices = dma_alloc_coherent(&priv->pdev->dev, priv->num_ntfy_blks * - sizeof(*priv->ntfy_blocks), - &priv->ntfy_block_bus, GFP_KERNEL); - if (!priv->ntfy_blocks) { + sizeof(*priv->irq_db_indices), + &priv->irq_db_indices_bus, GFP_KERNEL); + if (!priv->irq_db_indices) { err = -ENOMEM; goto abort_with_mgmt_vector; } + + priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * + sizeof(*priv->ntfy_blocks), GFP_KERNEL); + if (!priv->ntfy_blocks) { + err = -ENOMEM; + goto abort_with_irq_db_indices; + } + /* Setup the other blocks - the first n-1 vectors */ for (i = 0; i < priv->num_ntfy_blks; i++) { struct gve_notify_block *block = &priv->ntfy_blocks[i]; @@ -361,6 +369,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) } irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, get_cpu_mask(i % active_cpus)); + block->irq_db_index = &priv->irq_db_indices[i].index; } return 0; abort_with_some_ntfy_blocks: @@ -372,10 +381,13 @@ abort_with_some_ntfy_blocks: NULL); free_irq(priv->msix_vectors[msix_idx].vector, block); } - dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * - sizeof(*priv->ntfy_blocks), - priv->ntfy_blocks, priv->ntfy_block_bus); + kvfree(priv->ntfy_blocks); priv->ntfy_blocks = NULL; +abort_with_irq_db_indices: + dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * + sizeof(*priv->irq_db_indices), + priv->irq_db_indices, priv->irq_db_indices_bus); + priv->irq_db_indices = NULL; abort_with_mgmt_vector: free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); abort_with_msix_enabled: @@ -403,10 +415,12 @@ static void gve_free_notify_blocks(struct gve_priv *priv) free_irq(priv->msix_vectors[msix_idx].vector, block); } free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); - dma_free_coherent(&priv->pdev->dev, - priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks), - priv->ntfy_blocks, priv->ntfy_block_bus); + kvfree(priv->ntfy_blocks); priv->ntfy_blocks = NULL; + dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * + sizeof(*priv->irq_db_indices), + priv->irq_db_indices, priv->irq_db_indices_bus); + priv->irq_db_indices = NULL; pci_disable_msix(priv->pdev); kvfree(priv->msix_vectors); priv->msix_vectors = NULL; @@ -428,7 +442,7 @@ static int gve_setup_device_resources(struct gve_priv *priv) err = gve_adminq_configure_device_resources(priv, priv->counter_array_bus, priv->num_event_counters, - priv->ntfy_block_bus, + priv->irq_db_indices_bus, priv->num_ntfy_blks); if (unlikely(err)) { dev_err(&priv->pdev->dev, @@ -817,8 +831,7 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, put_page(page); } -static void gve_free_queue_page_list(struct gve_priv *priv, - int id) +static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) { struct gve_queue_page_list *qpl = &priv->qpls[id]; int i; @@ -1100,9 +1113,8 @@ static void gve_turnup(struct gve_priv *priv) if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); } else { - u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO); - - gve_write_irq_doorbell_dqo(priv, block, val); + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->tx_coalesce_usecs); } } for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { @@ -1113,9 +1125,8 @@ static void gve_turnup(struct gve_priv *priv) if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); } else { - u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO); - - gve_write_irq_doorbell_dqo(priv, block, val); + gve_set_itr_coalesce_usecs_dqo(priv, block, + priv->rx_coalesce_usecs); } } @@ -1412,6 +1423,11 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); + if (!gve_is_gqi(priv)) { + priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; + priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; + } + setup_device: err = gve_setup_device_resources(priv); if (!err) @@ -1663,6 +1679,58 @@ static void gve_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void gve_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + bool was_up = netif_carrier_ok(priv->dev); + + rtnl_lock(); + if (was_up && gve_close(priv->dev)) { + /* If the dev was up, attempt to close, if close fails, reset */ + gve_reset_and_teardown(priv, was_up); + } else { + /* If the dev wasn't up or close worked, finish tearing down */ + gve_teardown_priv_resources(priv); + } + rtnl_unlock(); +} + +#ifdef CONFIG_PM +static int gve_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + bool was_up = netif_carrier_ok(priv->dev); + + priv->suspend_cnt++; + rtnl_lock(); + if (was_up && gve_close(priv->dev)) { + /* If the dev was up, attempt to close, if close fails, reset */ + gve_reset_and_teardown(priv, was_up); + } else { + /* If the dev wasn't up or close worked, finish tearing down */ + gve_teardown_priv_resources(priv); + } + priv->up_before_suspend = was_up; + rtnl_unlock(); + return 0; +} + +static int gve_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct gve_priv *priv = netdev_priv(netdev); + int err; + + priv->resume_cnt++; + rtnl_lock(); + err = gve_reset_recovery(priv, priv->up_before_suspend); + rtnl_unlock(); + return err; +} +#endif /* CONFIG_PM */ + static const struct pci_device_id gve_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) }, { } @@ -1673,6 +1741,11 @@ static struct pci_driver gvnic_driver = { .id_table = gve_id_table, .probe = gve_probe, .remove = gve_remove, + .shutdown = gve_shutdown, +#ifdef CONFIG_PM + .suspend = gve_suspend, + .resume = gve_resume, +#endif }; module_pci_driver(gvnic_driver); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 3d04b5aff331..9ddcc497f48e 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -639,8 +639,6 @@ bool gve_rx_work_pending(struct gve_rx_ring *rx) desc = rx->desc.desc_ring + next_idx; flags_seq = desc->flags_seq; - /* Make sure we have synchronized the seq no with the device */ - smp_rmb(); return (GVE_SEQNO(flags_seq) == rx->desc.seqno); } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index a9cb241fedf4..4888bf05fbed 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -296,11 +296,14 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, return bytes; } -/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag, - * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor, - * and +1 if the payload wraps to the beginning of the FIFO. +/* The most descriptors we could need is MAX_SKB_FRAGS + 4 : + * 1 for each skb frag + * 1 for the skb linear portion + * 1 for when tcp hdr needs to be in separate descriptor + * 1 if the payload wraps to the beginning of the FIFO + * 1 for metadata descriptor */ -#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3) +#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4) static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) { if (info->skb) { @@ -395,6 +398,19 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, pkt_desc->pkt.seg_addr = cpu_to_be64(addr); } +static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc, + struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt)); + + mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH; + mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT | + GVE_MTD_PATH_HASH_L4; + mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash); + mtd_desc->mtd.reserved0 = 0; + mtd_desc->mtd.reserved1 = 0; +} + static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, struct sk_buff *skb, bool is_gso, u16 len, u64 addr) @@ -426,6 +442,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; union gve_tx_desc *pkt_desc, *seg_desc; struct gve_tx_buffer_state *info; + int mtd_desc_nr = !!skb->l4_hash; bool is_gso = skb_is_gso(skb); u32 idx = tx->req & tx->mask; int payload_iov = 2; @@ -457,7 +474,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st &info->iov[payload_iov]); gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, - 1 + payload_nfrags, hlen, + 1 + mtd_desc_nr + payload_nfrags, hlen, info->iov[hdr_nfrags - 1].iov_offset); skb_copy_bits(skb, 0, @@ -468,8 +485,13 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st info->iov[hdr_nfrags - 1].iov_len); copy_offset = hlen; + if (mtd_desc_nr) { + next_idx = (tx->req + 1) & tx->mask; + gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); + } + for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { - next_idx = (tx->req + 1 + i - payload_iov) & tx->mask; + next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; seg_desc = &tx->desc[next_idx]; gve_tx_fill_seg_desc(seg_desc, skb, is_gso, @@ -485,16 +507,17 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st copy_offset += info->iov[i].iov_len; } - return 1 + payload_nfrags; + return 1 + mtd_desc_nr + payload_nfrags; } static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); - int hlen, payload_nfrags, l4_hdr_offset; - union gve_tx_desc *pkt_desc, *seg_desc; + int hlen, num_descriptors, l4_hdr_offset; + union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc; struct gve_tx_buffer_state *info; + int mtd_desc_nr = !!skb->l4_hash; bool is_gso = skb_is_gso(skb); u32 idx = tx->req & tx->mask; u64 addr; @@ -523,23 +546,30 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, dma_unmap_len_set(info, len, len); dma_unmap_addr_set(info, dma, addr); - payload_nfrags = shinfo->nr_frags; + num_descriptors = 1 + shinfo->nr_frags; + if (hlen < len) + num_descriptors++; + if (mtd_desc_nr) + num_descriptors++; + + gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + num_descriptors, hlen, addr); + + if (mtd_desc_nr) { + idx = (idx + 1) & tx->mask; + mtd_desc = &tx->desc[idx]; + gve_tx_fill_mtd_desc(mtd_desc, skb); + } + if (hlen < len) { /* For gso the rest of the linear portion of the skb needs to * be in its own descriptor. */ - payload_nfrags++; - gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, - 1 + payload_nfrags, hlen, addr); - len -= hlen; addr += hlen; - idx = (tx->req + 1) & tx->mask; + idx = (idx + 1) & tx->mask; seg_desc = &tx->desc[idx]; gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); - } else { - gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, - 1 + payload_nfrags, hlen, addr); } for (i = 0; i < shinfo->nr_frags; i++) { @@ -560,11 +590,14 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); } - return 1 + payload_nfrags; + return num_descriptors; unmap_drop: - i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); + i += num_descriptors - shinfo->nr_frags; while (i--) { + /* Skip metadata descriptor, if set */ + if (i == 1 && mtd_desc_nr == 1) + continue; idx--; gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); } diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 88ca49cbc1e2..d57508bc4307 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; } else { skb = napi_alloc_skb(napi, len); + + if (unlikely(!skb)) + return NULL; set_protocol = true; } __skb_put(skb, len); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 23d9cbf262c3..740850b64aff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, return; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8. + We need check to prevent array overflow */ + if (port >= DSAF_MAX_PORT_NUM) + return; reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index ab7390225942..d7a27c244d48 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -663,9 +663,13 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, * hns_get_ringparam - get ring parameter * @net_dev: net device * @param: ethtool parameter + * @kernel_param: ethtool external parameter + * @extack: netlink extended ACK report struct */ static void hns_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 7aa2fac76c5e..6efea4662858 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -4,9 +4,9 @@ # ccflags-y += -I$(srctree)/$(src) - -obj-$(CONFIG_HNS3) += hns3pf/ -obj-$(CONFIG_HNS3) += hns3vf/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf +ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common obj-$(CONFIG_HNS3) += hnae3.o @@ -14,3 +14,16 @@ obj-$(CONFIG_HNS3_ENET) += hns3.o hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o + +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o + +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \ + hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \ + hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ + hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o + + +hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index c2bd2584201f..b668df6193be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode { #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ) +#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2) + struct hclge_ring_chain_param { u8 ring_type; u8 tqp_index; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 3f7a9a4c59d5..9298fbecb31a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -839,6 +839,8 @@ struct hnae3_handle { u8 netdev_flags; struct dentry *hnae3_dbgfs; + /* protects concurrent contention between debugfs commands */ + struct mutex dbgfs_lock; /* Network interface message level enabled bits */ u32 msg_enable; @@ -859,6 +861,20 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field(origin, 0x1 << (shift), shift) +#define HNAE3_FORMAT_MAC_ADDR_LEN 18 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5 + +static inline void hnae3_format_mac_addr(char *format_mac_addr, + const u8 *mac_addr) +{ + snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x", + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]); +} + int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c new file mode 100644 index 000000000000..c15ca710dabb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -0,0 +1,610 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw, + struct hclge_comm_cmq_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + u32 reg_val; + + if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) { + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + lower_32_bits(dma)); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + upper_32_bits(dma)); + reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); + reg_val &= HCLGE_COMM_NIC_SW_RST_RDY; + reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); + } else { + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + lower_32_bits(dma)); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + upper_32_bits(dma)); + reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S; + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); + } +} + +void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw) +{ + hclge_comm_cmd_config_regs(hw, &hw->cmq.csq); + hclge_comm_cmd_config_regs(hw, &hw->cmq.crq); +} + +void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) +{ + desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); +} + +static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, + bool is_pf) +{ + set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); + if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { + set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); + set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); + } +} + +void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, + bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); +} + +int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, bool en) +{ + struct hclge_comm_firmware_compat_cmd *req; + struct hclge_desc desc; + u32 compat = 0; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); + + if (en) { + req = (struct hclge_comm_firmware_compat_cmd *)desc.data; + + hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1); + if (hclge_comm_dev_phy_imp_supported(ae_dev)) + hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1); + hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1); + + req->compat = cpu_to_le32(compat); + } + + return hclge_comm_cmd_send(hw, &desc, 1); +} + +void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + if (!ring->desc) + return; + + dma_free_coherent(&ring->pdev->dev, size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; +} + +static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + ring->desc = dma_alloc_coherent(&ring->pdev->dev, + size, &ring->desc_dma_addr, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +static __le32 hclge_comm_build_api_caps(void) +{ + u32 api_caps = 0; + + hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1); + + return cpu_to_le32(api_caps); +} + +static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { + {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, + {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, + {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, + {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, + {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, + {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, + {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, + {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, + {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, + HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, + {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, +}; + +static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { + {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, + {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, + {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, +}; + +static void +hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf, + struct hclge_comm_query_version_cmd *cmd) +{ + const struct hclge_comm_caps_bit_map *caps_map = + is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps; + u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) : + ARRAY_SIZE(hclge_vf_cmd_caps); + u32 caps, i; + + caps = __le32_to_cpu(cmd->caps[0]); + for (i = 0; i < size; i++) + if (hnae3_get_bit(caps, caps_map[i].imp_bit)) + set_bit(caps_map[i].local_bit, ae_dev->caps); +} + +int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type) +{ + struct hclge_comm_cmq_ring *ring = + (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq : + &hw->cmq.crq; + int ret; + + ring->ring_type = ring_type; + + ret = hclge_comm_alloc_cmd_desc(ring); + if (ret) + dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n", + (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ", + ret); + + return ret; +} + +int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf) +{ + struct hclge_comm_query_version_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); + resp = (struct hclge_comm_query_version_cmd *)desc.data; + resp->api_caps = hclge_comm_build_api_caps(); + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + return ret; + + *fw_version = le32_to_cpu(resp->firmware); + + ae_dev->dev_version = le32_to_cpu(resp->hardware) << + HNAE3_PCI_REVISION_BIT_SIZE; + ae_dev->dev_version |= ae_dev->pdev->revision; + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + hclge_comm_set_default_capability(ae_dev, is_pf); + + hclge_comm_parse_capability(ae_dev, is_pf, resp); + + return ret; +} + +static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO }; + +static bool hclge_comm_is_special_opcode(u16 opcode) +{ + /* these commands have several descriptors, + * and use the first one to save opcode and return value + */ + u32 i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) + if (spec_opcode[i] == opcode) + return true; + + return false; +} + +static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw, + struct hclge_desc *desc, int num) +{ + struct hclge_desc *desc_to_use; + int handle = 0; + + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } +} + +static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring, + int head) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + +static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; + int clean; + u32 head; + + head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclge_comm_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n", + head, csq->next_to_use, csq->next_to_clean); + dev_warn(&hw->cmq.csq.pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + dev_warn(&hw->cmq.csq.pdev->dev, + "IMP firmware watchdog reset soon expected!\n"); + return -EIO; + } + + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; + return clean; +} + +static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) +{ + u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + return head == hw->cmq.csq.next_to_use; +} + +static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, + bool *is_completed) +{ + u32 timeout = 0; + + do { + if (hclge_comm_cmd_csq_done(hw)) { + *is_completed = true; + break; + } + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); +} + +static int hclge_comm_cmd_convert_err_code(u16 desc_ret) +{ + struct hclge_comm_errcode hclge_comm_cmd_errcode[] = { + { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 }, + { HCLGE_COMM_CMD_NO_AUTH, -EPERM }, + { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP }, + { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL }, + { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR }, + { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK }, + { HCLGE_COMM_CMD_PARA_ERR, -EINVAL }, + { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE }, + { HCLGE_COMM_CMD_TIMEOUT, -ETIME }, + { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK }, + { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO }, + { HCLGE_COMM_CMD_INVALID, -EBADR }, + }; + u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode); + u32 i; + + for (i = 0; i < errcode_count; i++) + if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret) + return hclge_comm_cmd_errcode[i].common_errno; + + return -EIO; +} + +static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw, + struct hclge_desc *desc, int num, + int ntc) +{ + u16 opcode, desc_ret; + int handle; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hclge_comm_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + hw->cmq.last_status = desc_ret; + + return hclge_comm_cmd_convert_err_code(desc_ret); +} + +static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, int ntc) +{ + bool is_completed = false; + int handle, ret; + + /* If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + hclge_comm_wait_for_resp(hw, &is_completed); + + if (!is_completed) + ret = -EBADE; + else + ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc); + + /* Clean the command send queue */ + handle = hclge_comm_cmd_csq_clean(hw); + if (handle < 0) + ret = handle; + else if (handle != num) + dev_warn(&hw->cmq.csq.pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + return ret; +} + +/** + * hclge_comm_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num) +{ + struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; + int ret; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + if (num > hclge_comm_ring_space(&hw->cmq.csq)) { + /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, + * need update the SW HEAD pointer csq->next_to_clean + */ + csq->next_to_clean = + hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG); + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + + hclge_comm_cmd_copy_desc(hw, desc, num); + + /* Write to hardware */ + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, + hw->cmq.csq.next_to_use); + + ret = hclge_comm_cmd_check_result(hw, desc, num, ntc); + + spin_unlock_bh(&hw->cmq.csq.lock); + + return ret; +} + +static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) +{ + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0); + hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0); +} + +void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + + hclge_comm_firmware_compat_config(ae_dev, hw, false); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME); + spin_lock_bh(&cmdq->csq.lock); + spin_lock(&cmdq->crq.lock); + hclge_comm_cmd_uninit_regs(hw); + spin_unlock(&cmdq->crq.lock); + spin_unlock_bh(&cmdq->csq.lock); + + hclge_comm_free_cmd_desc(&cmdq->csq); + hclge_comm_free_cmd_desc(&cmdq->crq); +} + +int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + int ret; + + /* Setup the lock for command queue */ + spin_lock_init(&cmdq->csq.lock); + spin_lock_init(&cmdq->crq.lock); + + cmdq->csq.pdev = pdev; + cmdq->crq.pdev = pdev; + + /* Setup the queue entries for use cmd queue */ + cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; + cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM; + + /* Setup Tx write back timeout */ + cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT; + + /* Setup queue rings */ + ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ); + if (ret) { + dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret); + return ret; + } + + ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ); + if (ret) { + dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret); + goto err_csq; + } + + return 0; +err_csq: + hclge_comm_free_cmd_desc(&hw->cmq.csq); + return ret; +} + +int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf, + unsigned long reset_pending) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + int ret; + + spin_lock_bh(&cmdq->csq.lock); + spin_lock(&cmdq->crq.lock); + + cmdq->csq.next_to_clean = 0; + cmdq->csq.next_to_use = 0; + cmdq->crq.next_to_clean = 0; + cmdq->crq.next_to_use = 0; + + hclge_comm_cmd_init_regs(hw); + + spin_unlock(&cmdq->crq.lock); + spin_unlock_bh(&cmdq->csq.lock); + + clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (reset_pending) { + ret = -EBUSY; + goto err_cmd_init; + } + + /* get version and device capabilities */ + ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw, + fw_version, is_pf); + if (ret) { + dev_err(&ae_dev->pdev->dev, + "failed to query version and capabilities, ret = %d\n", + ret); + goto err_cmd_init; + } + + dev_info(&ae_dev->pdev->dev, + "The firmware version is %lu.%lu.%lu.%lu\n", + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return 0; + + /* ask the firmware to enable some features, driver can work without + * it. + */ + ret = hclge_comm_firmware_compat_config(ae_dev, hw, true); + if (ret) + dev_warn(&ae_dev->pdev->dev, + "Firmware compatible features not enabled(%d).\n", + ret); + return 0; + +err_cmd_init: + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h new file mode 100644 index 000000000000..876650eddac4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -0,0 +1,458 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_CMD_H +#define __HCLGE_COMM_CMD_H +#include <linux/types.h> + +#include "hnae3.h" + +#define HCLGE_COMM_CMD_FLAG_IN BIT(0) +#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2) +#define HCLGE_COMM_CMD_FLAG_WR BIT(3) +#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4) + +#define HCLGE_COMM_SEND_SYNC(flag) \ + ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR) + +#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0 +#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1 +#define HCLGE_COMM_PHY_IMP_EN_B 2 +#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3 +#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4 + +#define hclge_comm_dev_phy_imp_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps) + +#define HCLGE_COMM_TYPE_CRQ 0 +#define HCLGE_COMM_TYPE_CSQ 1 + +#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200 + +/* bar registers for cmdq */ +#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028 +/* Vector0 interrupt CMDQ event source register(RW) */ +#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100 +/* Vector0 interrupt CMDQ event status register(RO) */ +#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104 +#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108 +#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C +#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104 + +/* this bit indicates that the driver is ready for hardware reset */ +#define HCLGE_COMM_NIC_SW_RST_RDY_B 16 +#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B) +#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 +#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 +#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000 + +enum hclge_opcode_type { + /* Generic commands */ + HCLGE_OPC_QUERY_FW_VER = 0x0001, + HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, + HCLGE_OPC_GBL_RST_STATUS = 0x0021, + HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, + HCLGE_OPC_QUERY_PF_RSRC = 0x0023, + HCLGE_OPC_QUERY_VF_RSRC = 0x0024, + HCLGE_OPC_GET_CFG_PARAM = 0x0025, + HCLGE_OPC_PF_RST_DONE = 0x0026, + HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027, + + HCLGE_OPC_STATS_64_BIT = 0x0030, + HCLGE_OPC_STATS_32_BIT = 0x0031, + HCLGE_OPC_STATS_MAC = 0x0032, + HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, + HCLGE_OPC_STATS_MAC_ALL = 0x0034, + + HCLGE_OPC_QUERY_REG_NUM = 0x0040, + HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, + HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, + HCLGE_OPC_DFX_BD_NUM = 0x0043, + HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, + HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, + HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, + HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, + HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, + HCLGE_OPC_DFX_NCSI_REG = 0x004A, + HCLGE_OPC_DFX_RTC_REG = 0x004B, + HCLGE_OPC_DFX_PPP_REG = 0x004C, + HCLGE_OPC_DFX_RCB_REG = 0x004D, + HCLGE_OPC_DFX_TQP_REG = 0x004E, + HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + + HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, + + /* MAC command */ + HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, + HCLGE_OPC_CONFIG_AN_MODE = 0x0304, + HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, + HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, + HCLGE_OPC_COMMON_LOOPBACK = 0x0315, + HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, + HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389, + + /* PTP commands */ + HCLGE_OPC_PTP_INT_EN = 0x0501, + HCLGE_OPC_PTP_MODE_CFG = 0x0507, + + /* PFC/Pause commands */ + HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HCLGE_OPC_CFG_MAC_PARA = 0x0703, + HCLGE_OPC_CFG_PFC_PARA = 0x0704, + HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, + HCLGE_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, + HCLGE_OPC_TM_PG_WEIGHT = 0x0809, + HCLGE_OPC_TM_QS_WEIGHT = 0x080A, + HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, + HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, + HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, + HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, + HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, + HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, + HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_TM_NODES = 0x0816, + HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, + HCLGE_OPC_QSET_DFX_STS = 0x0844, + HCLGE_OPC_PRI_DFX_STS = 0x0845, + HCLGE_OPC_PG_DFX_STS = 0x0846, + HCLGE_OPC_PORT_DFX_STS = 0x0847, + HCLGE_OPC_SCH_NQ_CNT = 0x0848, + HCLGE_OPC_SCH_RQ_CNT = 0x0849, + HCLGE_OPC_TM_INTERNAL_STS = 0x0850, + HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, + HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, + + /* Packet buffer allocate commands */ + HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, + HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, + HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, + + /* TQP management command */ + HCLGE_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP commands */ + HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, + HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, + HCLGE_OPC_QUERY_TX_STATS = 0x0B03, + HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, + HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, + HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, + HCLGE_OPC_QUERY_RX_STATS = 0x0B13, + HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, + HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, + HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* PPU commands */ + HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A, + + /* TSO command */ + HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, + + /* RSS commands */ + HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGE_OPC_RSS_TC_MODE = 0x0D08, + HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, + + /* Promisuous mode command */ + HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Vlan offload commands */ + HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, + HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, + + /* Interrupts commands */ + HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, + HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* MAC commands */ + HCLGE_OPC_MAC_VLAN_ADD = 0x1000, + HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, + HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, + HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, + HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + + /* MAC VLAN commands */ + HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033, + + /* VLAN commands */ + HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, + HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103, + + /* Flow Director commands */ + HCLGE_OPC_FD_MODE_CTRL = 0x1200, + HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, + HCLGE_OPC_FD_KEY_CONFIG = 0x1202, + HCLGE_OPC_FD_TCAM_OP = 0x1203, + HCLGE_OPC_FD_AD_OP = 0x1204, + HCLGE_OPC_FD_CNT_OP = 0x1205, + HCLGE_OPC_FD_USER_DEF_OP = 0x1207, + HCLGE_OPC_FD_QB_CTRL = 0x1210, + HCLGE_OPC_FD_QB_AD_OP = 0x1211, + + /* MDIO command */ + HCLGE_OPC_MDIO_CONFIG = 0x1900, + + /* QCN commands */ + HCLGE_OPC_QCN_MOD_CFG = 0x1A01, + HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, + HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, + HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, + HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, + HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, + HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + + /* Mailbox command */ + HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, + HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, + + /* Led command */ + HCLGE_OPC_LED_STATUS_CFG = 0xB000, + + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + + /* IMP stats command */ + HCLGE_OPC_IMP_STATS_BD = 0x7012, + HCLGE_OPC_IMP_STATS_INFO = 0x7013, + HCLGE_OPC_IMP_COMPAT_CFG = 0x701A, + + /* SFP command */ + HCLGE_OPC_GET_SFP_EEPROM = 0x7100, + HCLGE_OPC_GET_SFP_EXIST = 0x7101, + HCLGE_OPC_GET_SFP_INFO = 0x7104, + + /* Error INT commands */ + HCLGE_MAC_COMMON_INT_EN = 0x030E, + HCLGE_TM_SCH_ECC_INT_EN = 0x0829, + HCLGE_SSU_ECC_INT_CMD = 0x0989, + HCLGE_SSU_COMMON_INT_CMD = 0x098C, + HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, + HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, + HCLGE_COMMON_ECC_INT_CFG = 0x1505, + HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, + HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, + HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, + HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516, + HCLGE_QUERY_ALL_ERR_INFO = 0x1517, + HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, + HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, + HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585, + HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586, + HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, + HCLGE_IGU_COMMON_INT_EN = 0x1806, + HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, + HCLGE_PPP_CMD0_INT_CMD = 0x2100, + HCLGE_PPP_CMD1_INT_CMD = 0x2101, + HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, + HCLGE_NCSI_INT_EN = 0x2401, + + /* ROH MAC commands */ + HCLGE_OPC_MAC_ADDR_CHECK = 0x9004, + + /* PHY command */ + HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, + HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, +}; + +enum hclge_comm_cmd_return_status { + HCLGE_COMM_CMD_EXEC_SUCCESS = 0, + HCLGE_COMM_CMD_NO_AUTH = 1, + HCLGE_COMM_CMD_NOT_SUPPORTED = 2, + HCLGE_COMM_CMD_QUEUE_FULL = 3, + HCLGE_COMM_CMD_NEXT_ERR = 4, + HCLGE_COMM_CMD_UNEXE_ERR = 5, + HCLGE_COMM_CMD_PARA_ERR = 6, + HCLGE_COMM_CMD_RESULT_ERR = 7, + HCLGE_COMM_CMD_TIMEOUT = 8, + HCLGE_COMM_CMD_HILINK_ERR = 9, + HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10, + HCLGE_COMM_CMD_INVALID = 11, +}; + +enum HCLGE_COMM_CAP_BITS { + HCLGE_COMM_CAP_UDP_GSO_B, + HCLGE_COMM_CAP_QB_B, + HCLGE_COMM_CAP_FD_FORWARD_TC_B, + HCLGE_COMM_CAP_PTP_B, + HCLGE_COMM_CAP_INT_QL_B, + HCLGE_COMM_CAP_HW_TX_CSUM_B, + HCLGE_COMM_CAP_TX_PUSH_B, + HCLGE_COMM_CAP_PHY_IMP_B, + HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, + HCLGE_COMM_CAP_HW_PAD_B, + HCLGE_COMM_CAP_STASH_B, + HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, + HCLGE_COMM_CAP_RAS_IMP_B = 12, + HCLGE_COMM_CAP_FEC_B = 13, + HCLGE_COMM_CAP_PAUSE_B = 14, + HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, + HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, +}; + +enum HCLGE_COMM_API_CAP_BITS { + HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, +}; + +/* capabilities bits map between imp firmware and local driver */ +struct hclge_comm_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + +struct hclge_comm_firmware_compat_cmd { + __le32 compat; + u8 rsv[20]; +}; + +enum hclge_comm_cmd_state { + HCLGE_COMM_STATE_CMD_DISABLE, +}; + +struct hclge_comm_errcode { + u32 imp_errcode; + int common_errno; +}; + +#define HCLGE_COMM_QUERY_CAP_LENGTH 3 +struct hclge_comm_query_version_cmd { + __le32 firmware; + __le32 hardware; + __le32 api_caps; + __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */ +}; + +#define HCLGE_DESC_DATA_LEN 6 +struct hclge_desc { + __le16 opcode; + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[HCLGE_DESC_DATA_LEN]; +}; + +struct hclge_comm_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclge_desc *desc; + struct pci_dev *pdev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 ring_type; /* cmq ring type */ + spinlock_t lock; /* Command queue lock */ +}; + +enum hclge_comm_cmd_status { + HCLGE_COMM_STATUS_SUCCESS = 0, + HCLGE_COMM_ERR_CSQ_FULL = -1, + HCLGE_COMM_ERR_CSQ_TIMEOUT = -2, + HCLGE_COMM_ERR_CSQ_ERROR = -3, +}; + +struct hclge_comm_cmq { + struct hclge_comm_cmq_ring csq; + struct hclge_comm_cmq_ring crq; + u16 tx_timeout; + enum hclge_comm_cmd_status last_status; +}; + +struct hclge_comm_hw { + void __iomem *io_base; + void __iomem *mem_base; + struct hclge_comm_cmq cmq; + unsigned long comm_state; +}; + +static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define hclge_comm_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->io_base, reg, value) +#define hclge_comm_read_dev(a, reg) \ + hclge_comm_read_reg((a)->io_base, reg) + +void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw); +int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf); +int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type); +int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num); +void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); +int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, bool en); +void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring); +void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, + bool is_read); +void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw); +int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw); +int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, + u32 *fw_version, bool is_pf, + unsigned long reset_pending); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c new file mode 100644 index 000000000000..e23729ac3bb8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -0,0 +1,525 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. +#include <linux/skbuff.h> + +#include "hnae3.h" +#include "hclge_comm_cmd.h" +#include "hclge_comm_rss.h" + +static const u8 hclge_comm_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static void +hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg) +{ + rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; + rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + rss_tuple_cfg->ipv6_sctp_en = + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? + HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT : + HCLGE_COMM_RSS_INPUT_TUPLE_SCTP; + rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; +} + +int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg) +{ + u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size; + int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; + u16 *rss_ind_tbl; + + if (nic->flags & HNAE3_SUPPORT_VF) + rss_cfg->rss_size = nic->kinfo.rss_size; + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; + + hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets); + + rss_cfg->rss_algo = rss_algo; + + rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size, + sizeof(*rss_ind_tbl), GFP_KERNEL); + if (!rss_ind_tbl) + return -ENOMEM; + + rss_cfg->rss_indirection_tbl = rss_ind_tbl; + memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key, + HCLGE_COMM_RSS_KEY_SIZE); + + hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg); + + return 0; +} + +void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size) +{ + u16 roundup_size; + u32 i; + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; + } +} + +int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size) +{ + struct hclge_comm_rss_tc_mode_cmd *req; + struct hclge_desc desc; + unsigned int i; + int ret; + + req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) { + u16 mode = 0; + + hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M, + HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B, + tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET & + 0x1); + hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M, + HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]); + + req->rss_tc_mode[i] = cpu_to_le16(mode); + } + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + dev_err(&hw->cmq.csq.pdev->dev, + "failed to set rss tc mode, ret = %d.\n", ret); + + return ret; +} + +int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, + struct hclge_comm_hw *hw, const u8 *key, + const u8 hfunc) +{ + u8 hash_algo; + int ret; + + ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo); + if (ret) + return ret; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE); + } else { + ret = hclge_comm_set_rss_algo_key(hw, hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + } + rss_cfg->rss_algo = hash_algo; + + return 0; +} + +int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc) +{ + struct hclge_comm_rss_input_tuple_cmd *req; + struct hclge_desc desc; + int ret; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, + false); + + ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to init rss tuple cmd, ret = %d.\n", ret); + return ret; + } + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to set rss tuple, ret = %d.\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGE_COMM_RSS_KEY_SIZE; +} + +void hclge_comm_get_rss_type(struct hnae3_handle *nic, + struct hclge_comm_rss_tuple_cfg *rss_tuple_sets) +{ + if (rss_tuple_sets->ipv4_tcp_en || + rss_tuple_sets->ipv4_udp_en || + rss_tuple_sets->ipv4_sctp_en || + rss_tuple_sets->ipv6_tcp_en || + rss_tuple_sets->ipv6_udp_en || + rss_tuple_sets->ipv6_sctp_en) + nic->kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (rss_tuple_sets->ipv4_fragment_en || + rss_tuple_sets->ipv6_fragment_en) + nic->kinfo.rss_type = PKT_HASH_TYPE_L3; + else + nic->kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + +int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, + const u8 hfunc, u8 *hash_algo) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ; + return 0; + case ETH_RSS_HASH_XOR: + *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE; + return 0; + case ETH_RSS_HASH_NO_CHANGE: + *hash_algo = rss_cfg->rss_algo; + return 0; + default: + return -EINVAL; + } +} + +void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg) +{ + u16 i; + /* Initialize RSS indirect table */ + for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; +} + +int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, + u8 *tuple_sets) +{ + switch (flow_type) { + case TCP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void +hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, + u16 qid, u32 j) +{ + u8 rss_msb_oft; + u8 rss_msb_val; + + rss_msb_oft = + j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; + rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) << + (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); + req->rss_qid_h[rss_msb_oft] |= rss_msb_val; +} + +int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, const u16 *indir) +{ + struct hclge_comm_rss_ind_tbl_cmd *req; + struct hclge_desc desc; + u16 rss_cfg_tbl_num; + int ret; + u16 qid; + u16 i; + u32 j; + + req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data; + rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size / + HCLGE_COMM_RSS_CFG_TBL_SIZE; + + for (i = 0; i < rss_cfg_tbl_num; i++) { + hclge_comm_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_INDIR_TABLE, + false); + + req->start_table_index = + cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = + cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK); + for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) { + qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j]; + req->rss_qid_l[j] = qid & 0xff; + hclge_comm_append_rss_msb_info(req, qid, j); + } + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure rss table, ret = %d.\n", + ret); + return ret; + } + } + return 0; +} + +int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, + struct hclge_comm_hw *hw, bool is_pf, + struct hclge_comm_rss_cfg *rss_cfg) +{ + struct hclge_comm_rss_input_tuple_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, + false); + + req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + if (is_pf) + hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets); + + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure rss input, ret = %d.\n", ret); + return ret; +} + +void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, + u8 *hfunc) +{ + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->rss_algo) { + case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); +} + +void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, + u32 *indir, u16 rss_ind_tbl_size) +{ + u16 i; + + if (!indir) + return; + + for (i = 0; i < rss_ind_tbl_size; i++) + indir[i] = rss_cfg->rss_indirection_tbl[i]; +} + +int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, + const u8 *key) +{ + struct hclge_comm_rss_config_cmd *req; + unsigned int key_offset = 0; + struct hclge_desc desc; + int key_counts; + int key_size; + int ret; + + key_counts = HCLGE_COMM_RSS_KEY_SIZE; + req = (struct hclge_comm_rss_config_cmd *)desc.data; + + while (key_counts) { + hclge_comm_cmd_setup_basic_desc(&desc, + HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B); + + key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts); + memcpy(req->hash_key, + key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM, + key_size); + + key_counts -= key_size; + key_offset++; + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to configure RSS key, ret = %d.\n", + ret); + return ret; + } + } + + return 0; +} + +static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGE_COMM_D_PORT_BIT; + else + hash_sets &= ~HCLGE_COMM_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGE_COMM_S_IP_BIT; + else + hash_sets &= ~HCLGE_COMM_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGE_COMM_D_IP_BIT; + else + hash_sets &= ~HCLGE_COMM_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGE_COMM_V_TAG_BIT; + + return hash_sets; +} + +int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_input_tuple_cmd *req) +{ + u8 tuple_sets; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclge_comm_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && + (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + return 0; +} + +u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) +{ + u64 tuple_data = 0; + + if (tuple_sets & HCLGE_COMM_D_PORT_BIT) + tuple_data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGE_COMM_S_PORT_BIT) + tuple_data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGE_COMM_D_IP_BIT) + tuple_data |= RXH_IP_DST; + if (tuple_sets & HCLGE_COMM_S_IP_BIT) + tuple_data |= RXH_IP_SRC; + + return tuple_data; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h new file mode 100644 index 000000000000..aa1d7a6ff4ca --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_RSS_H +#define __HCLGE_COMM_RSS_H +#include <linux/types.h> + +#include "hnae3.h" +#include "hclge_comm_cmd.h" + +#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2 + +#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) + +#define HCLGE_COMM_D_PORT_BIT BIT(0) +#define HCLGE_COMM_S_PORT_BIT BIT(1) +#define HCLGE_COMM_D_IP_BIT BIT(2) +#define HCLGE_COMM_S_IP_BIT BIT(3) +#define HCLGE_COMM_V_TAG_BIT BIT(4) +#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \ + (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT) +#define HCLGE_COMM_MAX_TC_NUM 8 + +#define HCLGE_COMM_RSS_TC_OFFSET_S 0 +#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11 +#define HCLGE_COMM_RSS_TC_SIZE_S 12 +#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_COMM_RSS_TC_VALID_B 15 +#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3 + +struct hclge_comm_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + +#define HCLGE_COMM_RSS_KEY_SIZE 40 +#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16 +#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U +#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U +#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0) +#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0) +#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4 + +#define HCLGE_COMM_RSS_HASH_KEY_NUM 16 +struct hclge_comm_rss_config_cmd { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM]; +}; + +struct hclge_comm_rss_cfg { + u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */ + + /* shadow table */ + u16 *rss_indirection_tbl; + u32 rss_algo; + + struct hclge_comm_rss_tuple_cfg rss_tuple_sets; + u32 rss_size; +}; + +struct hclge_comm_rss_input_tuple_cmd { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +struct hclge_comm_rss_ind_tbl_cmd { + __le16 start_table_index; + __le16 rss_set_bitmap; + u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H]; + u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE]; +}; + +struct hclge_comm_rss_tc_mode_cmd { + __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM]; + u8 rsv[8]; +}; + +u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle); +void hclge_comm_get_rss_type(struct hnae3_handle *nic, + struct hclge_comm_rss_tuple_cfg *rss_tuple_sets); +void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg); +int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, + u8 *tuple_sets); +int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, + const u8 hfunc, u8 *hash_algo); +void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, + u8 *hfunc); +void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, + u32 *indir, __le16 rss_ind_tbl_size); +int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, + const u8 *key); +int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_input_tuple_cmd *req); +u64 hclge_comm_convert_rss_tuple(u8 tuple_sets); +int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, + struct hclge_comm_hw *hw, bool is_pf, + struct hclge_comm_rss_cfg *rss_cfg); +int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, const u16 *indir); +int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, + struct hnae3_ae_dev *ae_dev, + struct hclge_comm_rss_cfg *rss_cfg); +void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size); +int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, + u16 *tc_valid, u16 *tc_size); +int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, + struct hclge_comm_hw *hw, const u8 *key, + const u8 hfunc); +int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, + struct hclge_comm_hw *hw, + struct hclge_comm_rss_cfg *rss_cfg, + struct ethtool_rxnfc *nfc); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c new file mode 100644 index 000000000000..0c60f41fca8a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#include <linux/err.h> + +#include "hnae3.h" +#include "hclge_comm_cmd.h" +#include "hclge_comm_tqp_stats.h" + +u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + u64 *buff = data; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; + } + + return buff; +} + +int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE; +} + +u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u8 *buff = data; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_comm_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index); + buff += ETH_GSTRING_LEN; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_comm_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index); + buff += ETH_GSTRING_LEN; + } + + return buff; +} + +int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, + struct hclge_comm_hw *hw) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + struct hclge_desc desc; + int ret; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS, + true); + + desc.data[0] = cpu_to_le32(tqp->index); + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to get tqp stat, ret = %d, tx = %u.\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + + hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS, + true); + + desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); + ret = hclge_comm_cmd_send(hw, &desc, 1); + if (ret) { + dev_err(&hw->cmq.csq.pdev->dev, + "failed to get tqp stat, ret = %d, rx = %u.\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + le32_to_cpu(desc.data[1]); + } + + return 0; +} + +void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_comm_tqp *tqp; + struct hnae3_queue *queue; + u16 i; + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = kinfo->tqp[i]; + tqp = container_of(queue, struct hclge_comm_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h new file mode 100644 index 000000000000..a46350162ee8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021-2021 Hisilicon Limited. + +#ifndef __HCLGE_COMM_TQP_STATS_H +#define __HCLGE_COMM_TQP_STATS_H +#include <linux/types.h> +#include <linux/etherdevice.h> +#include "hnae3.h" + +/* each tqp has TX & RX two queues */ +#define HCLGE_COMM_QUEUE_PAIR_SIZE 2 + +/* TQP stats */ +struct hclge_comm_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclge_comm_tqp { + /* copy of device pointer from pci_dev, + * used when perform DMA mapping + */ + struct device *dev; + struct hnae3_queue q; + struct hclge_comm_tqp_stats tqp_stats; + u16 index; /* Global index in a NIC controller */ + + bool alloced; +}; + +u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data); +int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle); +u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data); +void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle); +int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, + struct hclge_comm_hw *hw); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 67364ab63a1f..f726a5b70f9e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -1081,8 +1081,9 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, u32 j = 0; sprintf(result[j++], "%u", index); - sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); sprintf(result[j++], "%u", + READ_ONCE(ring->page_pool->pages_state_hold_cnt)); + sprintf(result[j++], "%d", atomic_read(&ring->page_pool->pages_state_release_cnt)); sprintf(result[j++], "%u", ring->page_pool->p.pool_size); sprintf(result[j++], "%u", ring->page_pool->p.order); @@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) return -EFAULT; } + if (!priv->ring[h->kinfo.num_tqps].page_pool) { + dev_err(&h->pdev->dev, "page pool is not initialized\n"); + return -EFAULT; + } + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) result[i] = &data_str[i][0]; @@ -1220,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, if (ret) return ret; + mutex_lock(&handle->dbgfs_lock); save_buf = &hns3_dbg_cmd[index].buf; if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || @@ -1232,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, read_buf = *save_buf; } else { read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); - if (!read_buf) - return -ENOMEM; + if (!read_buf) { + ret = -ENOMEM; + goto out; + } /* save the buffer addr until the last read operation */ *save_buf = read_buf; - } - /* get data ready for the first time to read */ - if (!*ppos) { + /* get data ready for the first time to read */ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, read_buf, hns3_dbg_cmd[index].buf_len); if (ret) @@ -1249,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, size = simple_read_from_buffer(buffer, count, ppos, read_buf, strlen(read_buf)); - if (size > 0) + if (size > 0) { + mutex_unlock(&handle->dbgfs_lock); return size; + } out: /* free the buffer for the last read operation */ @@ -1259,6 +1268,7 @@ out: *save_buf = NULL; } + mutex_unlock(&handle->dbgfs_lock); return ret; } @@ -1331,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle) debugfs_create_dir(hns3_dbg_dentry[i].name, handle->hnae3_dbgfs); + mutex_init(&handle->dbgfs_lock); + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || @@ -1357,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle) return 0; out: + mutex_destroy(&handle->dbgfs_lock); debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; return ret; @@ -1372,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) hns3_dbg_cmd[i].buf = NULL; } + mutex_destroy(&handle->dbgfs_lock); debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index bd8801065e02..83aa1450ab9f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -4,6 +4,8 @@ #ifndef __HNS3_DEBUGFS_H #define __HNS3_DEBUGFS_H +#include "hnae3.h" + #define HNS3_DBG_READ_LEN 65536 #define HNS3_DBG_READ_LEN_128KB 0x20000 #define HNS3_DBG_READ_LEN_1MB 0x100000 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9ccebbaa0d69..babc5d7a3b52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -17,6 +17,7 @@ #include <linux/skbuff.h> #include <linux/sctp.h> #include <net/gre.h> +#include <net/gro.h> #include <net/ip6_checksum.h> #include <net/pkt_cls.h> #include <net/tcp.h> @@ -53,10 +54,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Network interface message level setting"); -static unsigned int tx_spare_buf_size; -module_param(tx_spare_buf_size, uint, 0400); -MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer"); - static unsigned int tx_sgl = 1; module_param(tx_sgl, uint, 0600); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); @@ -1005,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, return false; if (ALIGN(len, dma_get_cache_alignment()) > space) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1024,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, return false; if (space < HNS3_MAX_SGL_SIZE) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1041,8 +1034,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) dma_addr_t dma; int order; - alloc_size = tx_spare_buf_size ? tx_spare_buf_size : - ring->tqp->handle->kinfo.tx_spare_buf_size; + alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; if (!alloc_size) return; @@ -1306,7 +1298,7 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) if (!(!skb->encapsulation && (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || l4.udp->dest == htons(GENEVE_UDP_PORT) || - l4.udp->dest == htons(4790)))) + l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) return false; return true; @@ -1359,44 +1351,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, HNS3_TUN_NVGRE); } -static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, + u32 *type_cs_vlan_tso) { - unsigned char *l2_hdr = skb->data; - u32 l4_proto = ol4_proto; - union l4_hdr_info l4; - union l3_hdr_info l3; - u32 l2_len, l3_len; - - l4.hdr = skb_transport_header(skb); - l3.hdr = skb_network_header(skb); - - /* handle encapsulation skb */ - if (skb->encapsulation) { - /* If this is a not UDP/GRE encapsulation skb */ - if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - return skb_checksum_help(skb); - } - - hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); - - /* switch to inner header */ - l2_hdr = skb_inner_mac_header(skb); - l3.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - l4_proto = il4_proto; - } - if (l3.v4->version == 4) { hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV4); @@ -1410,15 +1367,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV6); } +} - /* compute inner(/normal) L2 header size, defined in 2 Bytes */ - l2_len = l3.hdr - l2_hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); - +static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, + u32 l4_proto, u32 *type_cs_vlan_tso) +{ /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: @@ -1464,6 +1417,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, return 0; } +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + return skb_checksum_help(skb); + } + + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + l4_proto = il4_proto; + } + + hns3_set_l3_type(skb, l3, type_cs_vlan_tso); + + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); +} + static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { @@ -1540,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) return true; } -static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc, - struct hns3_desc_cb *desc_cb) +struct hns3_desc_param { + u32 paylen_ol4cs; + u32 ol_type_vlan_len_msec; + u32 type_cs_vlan_tso; + u16 mss_hw_csum; + u16 inner_vtag; + u16 out_vtag; +}; + +static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) +{ + pa->paylen_ol4cs = skb->len; + pa->ol_type_vlan_len_msec = 0; + pa->type_cs_vlan_tso = 0; + pa->mss_hw_csum = 0; + pa->inner_vtag = 0; + pa->out_vtag = 0; +} + +static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_param *param) { - u32 ol_type_vlan_len_msec = 0; - u32 paylen_ol4cs = skb->len; - u32 type_cs_vlan_tso = 0; - u16 mss_hw_csum = 0; - u16 inner_vtag = 0; - u16 out_vtag = 0; int ret; ret = hns3_handle_vtags(ring, skb); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_vlan_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_vlan_err); return ret; } else if (ret == HNS3_INNER_VLAN_TAG) { - inner_vtag = skb_vlan_tag_get(skb); - inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->inner_vtag = skb_vlan_tag_get(skb); + param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); } else if (ret == HNS3_OUTER_VLAN_TAG) { - out_vtag = skb_vlan_tag_get(skb); - out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->out_vtag = skb_vlan_tag_get(skb); + param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1); } + return 0; +} - desc_cb->send_bytes = skb->len; +static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + struct hns3_desc_param *param) +{ + u8 ol4_proto, il4_proto; + int ret; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ol4_proto, il4_proto; - - if (hns3_check_hw_tx_csum(skb)) { - /* set checksum start and offset, defined in 2 Bytes */ - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, - skb_checksum_start_offset(skb) >> 1); - hns3_set_field(ol_type_vlan_len_msec, - HNS3_TXD_CSUM_OFFSET_S, - skb->csum_offset >> 1); - mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); - goto out_hw_tx_csum; - } + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(param->ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + return 0; + } - skb_reset_mac_len(skb); + skb_reset_mac_len(skb); - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l4_proto_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l4_proto_err); + return ret; + } - ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l2l3l4_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + ¶m->type_cs_vlan_tso, + ¶m->ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l2l3l4_err); + return ret; + } + + ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_tso_err); + return ret; + } + return 0; +} + +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc_param param; + int ret; + + hns3_init_desc_data(skb, ¶m); + ret = hns3_handle_vlan_info(ring, skb, ¶m); + if (unlikely(ret < 0)) + return ret; + + desc_cb->send_bytes = skb->len; - ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, - &type_cs_vlan_tso, &desc_cb->send_bytes); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_tso_err++; - u64_stats_update_end(&ring->syncp); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); + if (ret) return ret; - } } -out_hw_tx_csum: /* Set txbd */ desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); - desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + cpu_to_le32(param.ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); + desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); + desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); return 0; } @@ -1705,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, } if (unlikely(dma_mapping_error(dev, dma))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1853,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, * recursion level of over HNS3_MAX_RECURSION_LEVEL. */ if (bd_num == UINT_MAX) { - u64_stats_update_begin(&ring->syncp); - ring->stats.over_max_recursion++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, over_max_recursion); return -ENOMEM; } @@ -1864,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, */ if (skb->len > HNS3_MAX_TSO_SIZE || (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.hw_limitation++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, hw_limitation); return -ENOMEM; } if (__skb_linearize(skb)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1903,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, bd_num = hns3_tx_bd_count(skb->len); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_copy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_copy); } out: @@ -1925,9 +1949,7 @@ out: return bd_num; } - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_busy); return -EBUSY; } @@ -2012,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ring->pending_buf += num; if (!doorbell) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_more++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_more); return; } @@ -2064,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ret = skb_copy_bits(skb, 0, buf, size); if (unlikely(ret < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.copy_bits_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, copy_bits_err); return ret; } @@ -2089,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, dma_sync_single_for_device(ring_to_dev(ring), dma, size, DMA_TO_DEVICE); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bounce++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_bounce); + return bd_num; } @@ -2121,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); if (unlikely(nents < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.skb2sgl_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, skb2sgl_err); return -ENOMEM; } @@ -2132,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, DMA_TO_DEVICE); if (unlikely(!sgt->nents)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.map_sg_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, map_sg_err); return -ENOMEM; } @@ -2146,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, for (i = 0; i < sgt->nents; i++) bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), sg_dma_len(sgt->sgl + i)); - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_sgl++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_sgl); return bd_num; } @@ -2174,23 +2184,45 @@ out: return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); } +static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + int next_to_use_head) +{ + int ret; + + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; + + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_handle_desc_filling(ring, skb); + if (likely(ret > 0)) + return ret; + +fill_err: + hns3_clear_desc(ring, next_to_use_head); + return ret; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; - int pre_ntu, next_to_use_head; + int pre_ntu, ret; bool doorbell; - int ret; /* Hardware can only handle short frames above 32 bytes */ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return NETDEV_TX_OK; } @@ -2209,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) goto out_err_tx_ok; } - next_to_use_head = ring->next_to_use; - - ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], - desc_cb); - if (unlikely(ret < 0)) - goto fill_err; - - /* 'ret < 0' means filling error, 'ret == 0' means skb->len is - * zero, which is unlikely, and 'ret > 0' means how many tx desc - * need to be notified to the hw. - */ - ret = hns3_handle_desc_filling(ring, skb); + ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); if (unlikely(ret <= 0)) - goto fill_err; + goto out_err_tx_ok; pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); @@ -2244,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -fill_err: - hns3_clear_desc(ring, next_to_use_head); - out_err_tx_ok: dev_kfree_skb_any(skb); hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); @@ -2255,6 +2273,8 @@ out_err_tx_ok: static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -2263,8 +2283,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); return 0; } @@ -2273,8 +2294,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) */ if (!hns3_is_phys_func(h->pdev) && !is_zero_ether_addr(netdev->perm_addr)) { - netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", - netdev->perm_addr, mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); return -EPERM; } @@ -2382,90 +2405,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb, return features; } +static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, + struct hns3_enet_ring *ring, bool is_tx) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + if (is_tx) { + stats->tx_bytes += ring->stats.tx_bytes; + stats->tx_packets += ring->stats.tx_pkts; + stats->tx_dropped += ring->stats.sw_err_cnt; + stats->tx_dropped += ring->stats.tx_vlan_err; + stats->tx_dropped += ring->stats.tx_l4_proto_err; + stats->tx_dropped += ring->stats.tx_l2l3l4_err; + stats->tx_dropped += ring->stats.tx_tso_err; + stats->tx_dropped += ring->stats.over_max_recursion; + stats->tx_dropped += ring->stats.hw_limitation; + stats->tx_dropped += ring->stats.copy_bits_err; + stats->tx_dropped += ring->stats.skb2sgl_err; + stats->tx_dropped += ring->stats.map_sg_err; + stats->tx_errors += ring->stats.sw_err_cnt; + stats->tx_errors += ring->stats.tx_vlan_err; + stats->tx_errors += ring->stats.tx_l4_proto_err; + stats->tx_errors += ring->stats.tx_l2l3l4_err; + stats->tx_errors += ring->stats.tx_tso_err; + stats->tx_errors += ring->stats.over_max_recursion; + stats->tx_errors += ring->stats.hw_limitation; + stats->tx_errors += ring->stats.copy_bits_err; + stats->tx_errors += ring->stats.skb2sgl_err; + stats->tx_errors += ring->stats.map_sg_err; + } else { + stats->rx_bytes += ring->stats.rx_bytes; + stats->rx_packets += ring->stats.rx_pkts; + stats->rx_dropped += ring->stats.l2_err; + stats->rx_errors += ring->stats.l2_err; + stats->rx_errors += ring->stats.l3l4_csum_err; + stats->rx_crc_errors += ring->stats.l2_err; + stats->multicast += ring->stats.rx_multicast; + stats->rx_length_errors += ring->stats.err_pkt_len; + } + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +} + static void hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; + struct rtnl_link_stats64 ring_total_stats; struct hns3_enet_ring *ring; - u64 rx_length_errors = 0; - u64 rx_crc_errors = 0; - u64 rx_multicast = 0; - unsigned int start; - u64 tx_errors = 0; - u64 rx_errors = 0; unsigned int idx; - u64 tx_bytes = 0; - u64 rx_bytes = 0; - u64 tx_pkts = 0; - u64 rx_pkts = 0; - u64 tx_drop = 0; - u64 rx_drop = 0; if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; handle->ae_algo->ops->update_stats(handle, &netdev->stats); + memset(&ring_total_stats, 0, sizeof(ring_total_stats)); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ ring = &priv->ring[idx]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - tx_bytes += ring->stats.tx_bytes; - tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.sw_err_cnt; - tx_drop += ring->stats.tx_vlan_err; - tx_drop += ring->stats.tx_l4_proto_err; - tx_drop += ring->stats.tx_l2l3l4_err; - tx_drop += ring->stats.tx_tso_err; - tx_drop += ring->stats.over_max_recursion; - tx_drop += ring->stats.hw_limitation; - tx_drop += ring->stats.copy_bits_err; - tx_drop += ring->stats.skb2sgl_err; - tx_drop += ring->stats.map_sg_err; - tx_errors += ring->stats.sw_err_cnt; - tx_errors += ring->stats.tx_vlan_err; - tx_errors += ring->stats.tx_l4_proto_err; - tx_errors += ring->stats.tx_l2l3l4_err; - tx_errors += ring->stats.tx_tso_err; - tx_errors += ring->stats.over_max_recursion; - tx_errors += ring->stats.hw_limitation; - tx_errors += ring->stats.copy_bits_err; - tx_errors += ring->stats.skb2sgl_err; - tx_errors += ring->stats.map_sg_err; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + hns3_fetch_stats(&ring_total_stats, ring, true); /* fetch the rx stats */ ring = &priv->ring[idx + queue_num]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - rx_bytes += ring->stats.rx_bytes; - rx_pkts += ring->stats.rx_pkts; - rx_drop += ring->stats.l2_err; - rx_errors += ring->stats.l2_err; - rx_errors += ring->stats.l3l4_csum_err; - rx_crc_errors += ring->stats.l2_err; - rx_multicast += ring->stats.rx_multicast; - rx_length_errors += ring->stats.err_pkt_len; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - } - - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_pkts; - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_pkts; - - stats->rx_errors = rx_errors; - stats->multicast = rx_multicast; - stats->rx_length_errors = rx_length_errors; - stats->rx_crc_errors = rx_crc_errors; + hns3_fetch_stats(&ring_total_stats, ring, false); + } + + stats->tx_bytes = ring_total_stats.tx_bytes; + stats->tx_packets = ring_total_stats.tx_packets; + stats->rx_bytes = ring_total_stats.rx_bytes; + stats->rx_packets = ring_total_stats.rx_packets; + + stats->rx_errors = ring_total_stats.rx_errors; + stats->multicast = ring_total_stats.multicast; + stats->rx_length_errors = ring_total_stats.rx_length_errors; + stats->rx_crc_errors = ring_total_stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = tx_errors; - stats->rx_dropped = rx_drop; - stats->tx_dropped = tx_drop; + stats->tx_errors = ring_total_stats.tx_errors; + stats->rx_dropped = ring_total_stats.rx_dropped; + stats->tx_dropped = ring_total_stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -2658,18 +2680,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +static int hns3_get_timeout_queue(struct net_device *ndev) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = hns3_get_handle(ndev); - struct hns3_enet_ring *tx_ring; - struct napi_struct *napi; - int timeout_queue = 0; - int hw_head, hw_tail; - int fbd_num, fbd_oft; - int ebd_num, ebd_oft; - int bd_num, bd_err; - int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ @@ -2678,11 +2690,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) unsigned long trans_start; q = netdev_get_tx_queue(ndev, i); - trans_start = q->trans_start; + trans_start = READ_ONCE(q->trans_start); if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { - timeout_queue = i; +#ifdef CONFIG_BQL + struct dql *dql = &q->dql; + + netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", + dql->last_obj_cnt, dql->num_queued, + dql->adj_limit, dql->num_completed); +#endif netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", q->state, jiffies_to_msecs(jiffies - trans_start)); @@ -2690,17 +2708,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) } } - if (i == ndev->num_tx_queues) { - netdev_info(ndev, - "no netdev TX timeout queue found, timeout count: %llu\n", - priv->tx_timeout_count); - return false; - } - - priv->tx_timeout_count++; + return i; +} - tx_ring = &priv->ring[timeout_queue]; - napi = &tx_ring->tqp_vector->napi; +static void hns3_dump_queue_stats(struct net_device *ndev, + struct hns3_enet_ring *tx_ring, + int timeout_queue) +{ + struct napi_struct *napi = &tx_ring->tqp_vector->napi; + struct hns3_nic_priv *priv = netdev_priv(ndev); netdev_info(ndev, "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", @@ -2716,6 +2732,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); +} + +static void hns3_dump_queue_reg(struct net_device *ndev, + struct hns3_enet_ring *tx_ring) +{ + netdev_info(ndev, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), + readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), + hns3_tqp_read_reg(tx_ring, + HNS3_RING_TX_RING_EBD_OFFSET_REG)); +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hns3_enet_ring *tx_ring; + int timeout_queue; + + timeout_queue = hns3_get_timeout_queue(ndev); + if (timeout_queue >= ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + priv->tx_timeout_count++; + + tx_ring = &priv->ring[timeout_queue]; + hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); /* When mac received many pause frames continuous, it's unable to send * packets, which may cause tx timeout @@ -2728,32 +2786,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); } - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - fbd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_FBDNUM_REG); - fbd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_OFFSET_REG); - ebd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBDNUM_REG); - ebd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBD_OFFSET_REG); - bd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_NUM_REG); - bd_err = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_ERR_REG); - ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); - tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); - - netdev_info(ndev, - "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", - bd_num, hw_head, hw_tail, bd_err, - readl(tx_ring->tqp_vector->mask_addr)); - netdev_info(ndev, - "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", - ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); + hns3_dump_queue_reg(ndev, tx_ring); return true; } @@ -2836,14 +2869,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; if (!h->ae_algo->ops->set_vf_mac) return -EOPNOTSUPP; if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); netdev_err(netdev, - "Invalid MAC:%pM specified. Could not set MAC\n", - mac); + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); return -EINVAL; } @@ -3497,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, for (i = 0; i < cleand_count; i++) { desc_cb = &ring->desc_cb[ring->next_to_use]; if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, reuse_pg_cnt); hns3_reuse_buffer(ring, ring->next_to_use); } else { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", @@ -3519,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_reuse_pg++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, non_reuse_pg); } ring_ptr_move_fw(ring, next_to_use); @@ -3536,6 +3565,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) return page_count(cb->priv) == cb->pagecnt_bias; } +static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, + int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 frag_size = size - pull_len; + void *frag = napi_alloc_frag(frag_size); + + if (unlikely(!frag)) { + hns3_ring_stats_update(ring, frag_alloc_err); + + hns3_rl_err(ring_to_netdev(ring), + "failed to allocate rx frag\n"); + return -ENOMEM; + } + + desc_cb->reuse_flag = 1; + memcpy(frag, desc_cb->buf + frag_offset, frag_size); + skb_add_rx_frag(skb, i, virt_to_page(frag), + offset_in_page(frag), frag_size, frag_size); + + hns3_ring_stats_update(ring, frag_alloc); + return 0; +} + static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -3545,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, int size = le16_to_cpu(desc->rx.size); u32 truesize = hns3_buf_size(ring); u32 frag_size = size - pull_len; + int ret = 0; bool reused; if (ring->page_pool) { @@ -3579,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc_cb->page_offset = 0; desc_cb->reuse_flag = 1; } else if (frag_size <= ring->rx_copybreak) { - void *frag = napi_alloc_frag(frag_size); - - if (unlikely(!frag)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc_err++; - u64_stats_update_end(&ring->syncp); - - hns3_rl_err(ring_to_netdev(ring), - "failed to allocate rx frag\n"); + ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); + if (ret) goto out; - } - - desc_cb->reuse_flag = 1; - memcpy(frag, desc_cb->buf + frag_offset, frag_size); - skb_add_rx_frag(skb, i, virt_to_page(frag), - offset_in_page(frag), frag_size, frag_size); - - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc++; - u64_stats_update_end(&ring->syncp); - return; } out: @@ -3682,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring, hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) return false; - u64_stats_update_begin(&ring->syncp); - ring->stats.csum_complete++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, csum_complete); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)csum); @@ -3758,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, l3l4_csum_err); return; } @@ -3851,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, skb = ring->skb; if (unlikely(!skb)) { hns3_rl_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -3885,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (ring->page_pool) skb_mark_for_recycle(skb); - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, seg_pkt_cnt); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); @@ -4015,6 +4046,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, skb_set_hash(skb, rss_hash, rss_type); } +static void hns3_handle_rx_ts_info(struct net_device *netdev, + struct hns3_desc *desc, struct sk_buff *skb, + u32 bd_base_info) +{ + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 nsec = le32_to_cpu(desc->ts_nsec); + u32 sec = le32_to_cpu(desc->ts_sec); + + if (h->ae_algo->ops->get_rx_hwts) + h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); + } +} + +static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, struct sk_buff *skb, + u32 l234info) +{ + struct net_device *netdev = ring_to_netdev(ring); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } +} + static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring_to_netdev(ring); @@ -4037,26 +4101,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ol_info = le32_to_cpu(desc->rx.ol_info); csum = le16_to_cpu(desc->csum); - if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { - struct hnae3_handle *h = hns3_get_handle(netdev); - u32 nsec = le32_to_cpu(desc->ts_nsec); - u32 sec = le32_to_cpu(desc->ts_sec); - - if (h->ae_algo->ops->get_rx_hwts) - h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); - } - - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; + hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); - } + hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | BIT(HNS3_RXD_L2E_B))))) { @@ -4079,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info, ol_info, csum); if (unlikely(ret)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, rx_err_cnt); return ret; } @@ -4297,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) return rx_pkt_total; } -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) +static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node **head, + bool is_tx) { + u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; + u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; + struct hnae3_ring_chain_node *cur_chain = *head; struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); - - cur_chain = chain; - } - } + struct hns3_enet_ring *ring; - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; - rx_ring = rx_ring->next; + if (cur_chain) { + while (cur_chain->next) + cur_chain = cur_chain->next; } - while (rx_ring) { + while (ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; + return -ENOMEM; + if (cur_chain) + cur_chain->next = chain; + else + *head = chain; + chain->tqp_index = ring->tqp->tqp_index; hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + bit_value); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, field_value); cur_chain = chain; - rx_ring = rx_ring->next; + ring = ring->next; } return 0; +} + +static struct hnae3_ring_chain_node * +hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = NULL; + struct hnae3_ring_chain_node *chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) + goto err_free_chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) + goto err_free_chain; + + return cur_chain; err_free_chain: - cur_chain = head->next; while (cur_chain) { chain = cur_chain->next; devm_kfree(&pdev->dev, cur_chain); cur_chain = chain; } - head->next = NULL; - return -ENOMEM; + return NULL; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -4386,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, struct pci_dev *pdev = tqp_vector->handle->pdev; struct hnae3_ring_chain_node *chain_tmp, *chain; - chain = head->next; + chain = head; while (chain) { chain_tmp = chain->next; @@ -4501,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; tqp_vector = &priv->tqp_vector[i]; @@ -4511,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector->tx_group.total_packets = 0; tqp_vector->handle = h; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) { + ret = -ENOMEM; goto map_ring_fail; + } ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); if (ret) goto map_ring_fail; @@ -4618,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int i; @@ -4633,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) * chain between vector and ring, we should go on to deal with * the remaining options. */ - if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) dev_warn(priv->dev, "failed to get ring chain\n"); h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); @@ -4934,6 +4964,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; int ret = 0; @@ -4944,8 +4975,9 @@ static int hns3_init_mac_addr(struct net_device *netdev) /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(mac_addr_temp)) { eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { eth_hw_addr_set(netdev, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); @@ -4997,8 +5029,10 @@ static void hns3_client_stop(struct hnae3_handle *handle) static void hns3_info_show(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; - dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); @@ -5287,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) if (!ring->desc_cb[ring->next_to_use].reuse_flag) { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); /* if alloc new buffer fail, exit directly * and reclear in up flow. */ @@ -5531,8 +5563,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) return 0; } -static int hns3_reset_notify(struct hnae3_handle *handle, - enum hnae3_reset_notify_type type) +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) { int ret = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1715c98d906d..a05a0c7423ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -10,6 +10,9 @@ #include "hnae3.h" +struct iphdr; +struct ipv6hdr; + enum hns3_nic_state { HNS3_NIC_STATE_TESTING, HNS3_NIC_STATE_RESETTING, @@ -621,6 +624,11 @@ static inline int ring_space(struct hns3_enet_ring *ring) (begin - end)) - 1; } +static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg) +{ + return readl_relaxed(ring->tqp->io_base + reg); +} + static inline u32 hns3_read_reg(void __iomem *base, u32 reg) { return readl(base + reg); @@ -655,6 +663,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hns3_buf_size(_ring) ((_ring)->buf_size) +#define hns3_ring_stats_update(ring, cnt) do { \ + typeof(ring) (tmp) = (ring); \ + u64_stats_update_begin(&(tmp)->syncp); \ + ((tmp)->stats.cnt)++; \ + u64_stats_update_end(&(tmp)->syncp); \ +} while (0) \ + static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) { #if (PAGE_SIZE < 8192) @@ -705,6 +720,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, u32 ql_value); void hns3_request_update_promisc_mode(struct hnae3_handle *handle); +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c8442b86df94..c06c39ece80d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -643,11 +643,13 @@ static u32 hns3_get_link(struct net_device *netdev) } static void hns3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int queue_num = h->kinfo.num_tqps; + int rx_queue_index = h->kinfo.num_tqps; if (hns3_nic_resetting(netdev)) { netdev_err(netdev, "dev resetting!"); @@ -658,7 +660,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->rx_max_pending = HNS3_RING_MAX_PENDING; param->tx_pending = priv->ring[0].desc_num; - param->rx_pending = priv->ring[queue_num].desc_num; + param->rx_pending = priv->ring[rx_queue_index].desc_num; + kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; } static void hns3_get_pauseparam(struct net_device *netdev, @@ -987,6 +990,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; const struct hns3_reset_type_map *rst_type_map; + enum ethtool_reset_flags rst_flags; u32 i, size; if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) @@ -1006,6 +1010,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) for (i = 0; i < size; i++) { if (rst_type_map[i].rst_flags == *flags) { rst_type = rst_type_map[i].rst_type; + rst_flags = rst_type_map[i].rst_flags; break; } } @@ -1021,6 +1026,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags) ops->reset_event(h->pdev, h); + *flags &= ~rst_flags; + return 0; } @@ -1060,14 +1067,23 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) } static int hns3_check_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param) { +#define RX_BUF_LEN_2K 2048 +#define RX_BUF_LEN_4K 4096 if (hns3_nic_resetting(ndev)) return -EBUSY; if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; + if (kernel_param->rx_buf_len != RX_BUF_LEN_2K && + kernel_param->rx_buf_len != RX_BUF_LEN_4K) { + netdev_err(ndev, "Rx buf len only support 2048 and 4096\n"); + return -EINVAL; + } + if (param->tx_pending > HNS3_RING_MAX_PENDING || param->tx_pending < HNS3_RING_MIN_PENDING || param->rx_pending > HNS3_RING_MAX_PENDING || @@ -1080,8 +1096,26 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.rx_buf_len = rx_buf_len; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->kinfo.tqp[i]->buf_size = rx_buf_len; + priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len; + } + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; @@ -1090,9 +1124,10 @@ static int hns3_set_ringparam(struct net_device *ndev, u32 old_tx_desc_num, new_tx_desc_num; u32 old_rx_desc_num, new_rx_desc_num; u16 queue_num = h->kinfo.num_tqps; + u32 old_rx_buf_len; int ret, i; - ret = hns3_check_ringparam(ndev, param); + ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; @@ -1101,8 +1136,10 @@ static int hns3_set_ringparam(struct net_device *ndev, new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); old_tx_desc_num = priv->ring[0].desc_num; old_rx_desc_num = priv->ring[queue_num].desc_num; + old_rx_buf_len = priv->ring[queue_num].buf_size; if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num) + old_rx_desc_num == new_rx_desc_num && + kernel_param->rx_buf_len == old_rx_buf_len) return 0; tmp_rings = hns3_backup_ringparam(priv); @@ -1113,19 +1150,22 @@ static int hns3_set_ringparam(struct net_device *ndev, } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u\n", + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num); + new_tx_desc_num, new_rx_desc_num, + old_rx_buf_len, kernel_param->rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); + hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { - netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n", + netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); + hns3_change_rx_buf_len(ndev, old_rx_buf_len); hns3_change_all_ring_bd_num(priv, old_tx_desc_num, old_rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) @@ -1695,6 +1735,7 @@ static int hns3_get_tunable(struct net_device *netdev, void *data) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; int ret = 0; switch (tuna->id) { @@ -1705,6 +1746,9 @@ static int hns3_get_tunable(struct net_device *netdev, case ETHTOOL_RX_COPYBREAK: *(u32 *)data = priv->rx_copybreak; break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + *(u32 *)data = h->kinfo.tx_spare_buf_size; + break; default: ret = -EOPNOTSUPP; break; @@ -1713,11 +1757,43 @@ static int hns3_get_tunable(struct net_device *netdev, return ret; } +static int hns3_set_tx_spare_buf_size(struct net_device *netdev, + u32 data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + h->kinfo.tx_spare_buf_size = data; + + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UP_CLIENT); + if (ret) + hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + + return ret; +} + static int hns3_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tuna, const void *data) { struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 old_tx_spare_buf_size, new_tx_spare_buf_size; struct hnae3_handle *h = priv->ae_handle; int i, ret = 0; @@ -1736,6 +1812,26 @@ static int hns3_set_tunable(struct net_device *netdev, priv->ring[i].rx_copybreak = priv->rx_copybreak; break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; + new_tx_spare_buf_size = *(u32 *)data; + ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); + if (ret) { + int ret1; + + netdev_warn(netdev, + "change tx spare buf size fail, revert to old value\n"); + ret1 = hns3_set_tx_spare_buf_size(netdev, + old_tx_spare_buf_size); + if (ret1) { + netdev_err(netdev, + "revert to old tx spare buf size fail\n"); + return ret1; + } + + return ret; + } + break; default: ret = -EOPNOTSUPP; break; @@ -1751,6 +1847,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) +#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN + static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { @@ -1829,6 +1927,7 @@ static int hns3_get_link_ext_state(struct net_device *netdev, static const struct ethtool_ops hns3vf_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, .set_ringparam = hns3_set_ringparam, @@ -1860,6 +1959,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { static const struct ethtool_ops hns3_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, .self_test = hns3_self_test, .get_drvinfo = hns3_get_drvinfo, .get_link = hns3_get_link, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile deleted file mode 100644 index d1bf5c4c0abb..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0+ -# -# Makefile for the HISILICON network device drivers. -# - -ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 -ccflags-y += -I $(srctree)/$(src) - -obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o - -hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c deleted file mode 100644 index c5d5466810bb..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ /dev/null @@ -1,591 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -// Copyright (c) 2016-2017 Hisilicon Limited. - -#include <linux/dma-mapping.h> -#include <linux/slab.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/err.h> -#include <linux/dma-direction.h> -#include "hclge_cmd.h" -#include "hnae3.h" -#include "hclge_main.h" - -#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) - -static int hclge_ring_space(struct hclge_cmq_ring *ring) -{ - int ntu = ring->next_to_use; - int ntc = ring->next_to_clean; - int used = (ntu - ntc + ring->desc_num) % ring->desc_num; - - return ring->desc_num - used - 1; -} - -static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head) -{ - int ntu = ring->next_to_use; - int ntc = ring->next_to_clean; - - if (ntu > ntc) - return head >= ntc && head <= ntu; - - return head >= ntc || head <= ntu; -} - -static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclge_desc); - - ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, - &ring->desc_dma_addr, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - return 0; -} - -static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclge_desc); - - if (ring->desc) { - dma_free_coherent(cmq_ring_to_dev(ring), size, - ring->desc, ring->desc_dma_addr); - ring->desc = NULL; - } -} - -static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) -{ - struct hclge_hw *hw = &hdev->hw; - struct hclge_cmq_ring *ring = - (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; - int ret; - - ring->ring_type = ring_type; - ring->dev = hdev; - - ret = hclge_alloc_cmd_desc(ring); - if (ret) { - dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n", - (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret); - return ret; - } - - return 0; -} - -void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) -{ - desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); - if (is_read) - desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); -} - -void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, - enum hclge_opcode_type opcode, bool is_read) -{ - memset((void *)desc, 0, sizeof(struct hclge_desc)); - desc->opcode = cpu_to_le16(opcode); - desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); - - if (is_read) - desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); -} - -static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) -{ - dma_addr_t dma = ring->desc_dma_addr; - struct hclge_dev *hdev = ring->dev; - struct hclge_hw *hw = &hdev->hw; - u32 reg_val; - - if (ring->ring_type == HCLGE_TYPE_CSQ) { - hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, - lower_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, - upper_32_bits(dma)); - reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG); - reg_val &= HCLGE_NIC_SW_RST_RDY; - reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S; - hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); - hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); - } else { - hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, - lower_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, - upper_32_bits(dma)); - hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, - ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S); - hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); - } -} - -static void hclge_cmd_init_regs(struct hclge_hw *hw) -{ - hclge_cmd_config_regs(&hw->cmq.csq); - hclge_cmd_config_regs(&hw->cmq.crq); -} - -static int hclge_cmd_csq_clean(struct hclge_hw *hw) -{ - struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); - struct hclge_cmq_ring *csq = &hw->cmq.csq; - u32 head; - int clean; - - head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ - - if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); - dev_warn(&hdev->pdev->dev, - "Disabling any further commands to IMP firmware\n"); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - dev_warn(&hdev->pdev->dev, - "IMP firmware watchdog reset soon expected!\n"); - return -EIO; - } - - clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; - csq->next_to_clean = head; - return clean; -} - -static int hclge_cmd_csq_done(struct hclge_hw *hw) -{ - u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); - return head == hw->cmq.csq.next_to_use; -} - -static bool hclge_is_special_opcode(u16 opcode) -{ - /* these commands have several descriptors, - * and use the first one to save opcode and return value - */ - static const u16 spec_opcode[] = { - HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, - HCLGE_OPC_STATS_MAC, - HCLGE_OPC_STATS_MAC_ALL, - HCLGE_OPC_QUERY_32_BIT_REG, - HCLGE_OPC_QUERY_64_BIT_REG, - HCLGE_QUERY_CLEAR_MPF_RAS_INT, - HCLGE_QUERY_CLEAR_PF_RAS_INT, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, - HCLGE_QUERY_ALL_ERR_INFO - }; - int i; - - for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { - if (spec_opcode[i] == opcode) - return true; - } - - return false; -} - -struct errcode { - u32 imp_errcode; - int common_errno; -}; - -static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc, - int num) -{ - struct hclge_desc *desc_to_use; - int handle = 0; - - while (handle < num) { - desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; - *desc_to_use = desc[handle]; - (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) - hw->cmq.csq.next_to_use = 0; - handle++; - } -} - -static int hclge_cmd_convert_err_code(u16 desc_ret) -{ - struct errcode hclge_cmd_errcode[] = { - {HCLGE_CMD_EXEC_SUCCESS, 0}, - {HCLGE_CMD_NO_AUTH, -EPERM}, - {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP}, - {HCLGE_CMD_QUEUE_FULL, -EXFULL}, - {HCLGE_CMD_NEXT_ERR, -ENOSR}, - {HCLGE_CMD_UNEXE_ERR, -ENOTBLK}, - {HCLGE_CMD_PARA_ERR, -EINVAL}, - {HCLGE_CMD_RESULT_ERR, -ERANGE}, - {HCLGE_CMD_TIMEOUT, -ETIME}, - {HCLGE_CMD_HILINK_ERR, -ENOLINK}, - {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO}, - {HCLGE_CMD_INVALID, -EBADR}, - }; - u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode); - u32 i; - - for (i = 0; i < errcode_count; i++) - if (hclge_cmd_errcode[i].imp_errcode == desc_ret) - return hclge_cmd_errcode[i].common_errno; - - return -EIO; -} - -static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, - int num, int ntc) -{ - u16 opcode, desc_ret; - int handle; - - opcode = le16_to_cpu(desc[0].opcode); - for (handle = 0; handle < num; handle++) { - desc[handle] = hw->cmq.csq.desc[ntc]; - ntc++; - if (ntc >= hw->cmq.csq.desc_num) - ntc = 0; - } - if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[num - 1].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - - hw->cmq.last_status = desc_ret; - - return hclge_cmd_convert_err_code(desc_ret); -} - -static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc, - int num, int ntc) -{ - struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); - bool is_completed = false; - u32 timeout = 0; - int handle, ret; - - /** - * If the command is sync, wait for the firmware to write back, - * if multi descriptors to be sent, use the first one to check - */ - if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { - do { - if (hclge_cmd_csq_done(hw)) { - is_completed = true; - break; - } - udelay(1); - timeout++; - } while (timeout < hw->cmq.tx_timeout); - } - - if (!is_completed) - ret = -EBADE; - else - ret = hclge_cmd_check_retval(hw, desc, num, ntc); - - /* Clean the command send queue */ - handle = hclge_cmd_csq_clean(hw); - if (handle < 0) - ret = handle; - else if (handle != num) - dev_warn(&hdev->pdev->dev, - "cleaned %d, need to clean %d\n", handle, num); - return ret; -} - -/** - * hclge_cmd_send - send command to command queue - * @hw: pointer to the hw struct - * @desc: prefilled descriptor for describing the command - * @num : the number of descriptors to be sent - * - * This is the main send command for command queue, it - * sends the queue, cleans the queue, etc - **/ -int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) -{ - struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); - struct hclge_cmq_ring *csq = &hw->cmq.csq; - int ret; - int ntc; - - spin_lock_bh(&hw->cmq.csq.lock); - - if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - if (num > hclge_ring_space(&hw->cmq.csq)) { - /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, - * need update the SW HEAD pointer csq->next_to_clean - */ - csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - /** - * Record the location of desc in the ring for this time - * which will be use for hardware to write back - */ - ntc = hw->cmq.csq.next_to_use; - - hclge_cmd_copy_desc(hw, desc, num); - - /* Write to hardware */ - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); - - ret = hclge_cmd_check_result(hw, desc, num, ntc); - - spin_unlock_bh(&hw->cmq.csq.lock); - - return ret; -} - -static void hclge_set_default_capability(struct hclge_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - - set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); - if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); - } -} - -static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = { - {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, - {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, - {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, - {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, - {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, - {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, - {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, - {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, - {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, - {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, - {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, - {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, - {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, - {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, -}; - -static void hclge_parse_capability(struct hclge_dev *hdev, - struct hclge_query_version_cmd *cmd) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps, i; - - caps = __le32_to_cpu(cmd->caps[0]); - for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++) - if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit)) - set_bit(hclge_cmd_caps_bit_map0[i].local_bit, - ae_dev->caps); -} - -static __le32 hclge_build_api_caps(void) -{ - u32 api_caps = 0; - - hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1); - - return cpu_to_le32(api_caps); -} - -static enum hclge_cmd_status -hclge_cmd_query_version_and_capability(struct hclge_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hclge_query_version_cmd *resp; - struct hclge_desc desc; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); - resp = (struct hclge_query_version_cmd *)desc.data; - resp->api_caps = hclge_build_api_caps(); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - return ret; - - hdev->fw_version = le32_to_cpu(resp->firmware); - - ae_dev->dev_version = le32_to_cpu(resp->hardware) << - HNAE3_PCI_REVISION_BIT_SIZE; - ae_dev->dev_version |= hdev->pdev->revision; - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) - hclge_set_default_capability(hdev); - - hclge_parse_capability(hdev, resp); - - return ret; -} - -int hclge_cmd_queue_init(struct hclge_dev *hdev) -{ - int ret; - - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - - /* Setup the queue entries for use cmd queue */ - hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; - hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; - - /* Setup Tx write back timeout */ - hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; - - /* Setup queue rings */ - ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CSQ ring setup error %d\n", ret); - return ret; - } - - ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CRQ ring setup error %d\n", ret); - goto err_csq; - } - - return 0; -err_csq: - hclge_free_cmd_desc(&hdev->hw.cmq.csq); - return ret; -} - -static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en) -{ - struct hclge_firmware_compat_cmd *req; - struct hclge_desc desc; - u32 compat = 0; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); - - if (en) { - req = (struct hclge_firmware_compat_cmd *)desc.data; - - hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); - hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); - if (hnae3_dev_phy_imp_supported(hdev)) - hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); - hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1); - hnae3_set_bit(compat, HCLGE_SYNC_RX_RING_HEAD_EN_B, 1); - - req->compat = cpu_to_le32(compat); - } - - return hclge_cmd_send(&hdev->hw, &desc, 1); -} - -int hclge_cmd_init(struct hclge_dev *hdev) -{ - int ret; - - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - - hdev->hw.cmq.csq.next_to_clean = 0; - hdev->hw.cmq.csq.next_to_use = 0; - hdev->hw.cmq.crq.next_to_clean = 0; - hdev->hw.cmq.crq.next_to_use = 0; - - hclge_cmd_init_regs(&hdev->hw); - - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - - /* Check if there is new reset pending, because the higher level - * reset may happen when lower level reset is being processed. - */ - if ((hclge_is_reset_pending(hdev))) { - dev_err(&hdev->pdev->dev, - "failed to init cmd since reset %#lx pending\n", - hdev->reset_pending); - ret = -EBUSY; - goto err_cmd_init; - } - - /* get version and device capabilities */ - ret = hclge_cmd_query_version_and_capability(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to query version and capabilities, ret = %d\n", - ret); - goto err_cmd_init; - } - - dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, - HNAE3_FW_VERSION_BYTE3_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, - HNAE3_FW_VERSION_BYTE2_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, - HNAE3_FW_VERSION_BYTE1_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, - HNAE3_FW_VERSION_BYTE0_SHIFT)); - - /* ask the firmware to enable some features, driver can work without - * it. - */ - ret = hclge_firmware_compat_config(hdev, true); - if (ret) - dev_warn(&hdev->pdev->dev, - "Firmware compatible features not enabled(%d).\n", - ret); - - return 0; - -err_cmd_init: - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - - return ret; -} - -static void hclge_cmd_uninit_regs(struct hclge_hw *hw) -{ - hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); - hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); -} - -void hclge_cmd_uninit(struct hclge_dev *hdev) -{ - hclge_firmware_compat_config(hdev, false); - - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - /* wait to ensure that the firmware completes the possible left - * over commands. - */ - msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - hclge_cmd_uninit_regs(&hdev->hw); - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - hclge_free_cmd_desc(&hdev->hw.cmq.csq); - hclge_free_cmd_desc(&hdev->hw.cmq.crq); -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index d24e59028798..f9d89511eb32 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -7,323 +7,21 @@ #include <linux/io.h> #include <linux/etherdevice.h> #include "hnae3.h" - -#define HCLGE_CMDQ_TX_TIMEOUT 30000 -#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 -#define HCLGE_DESC_DATA_LEN 6 +#include "hclge_comm_cmd.h" struct hclge_dev; -struct hclge_desc { - __le16 opcode; #define HCLGE_CMDQ_RX_INVLD_B 0 #define HCLGE_CMDQ_RX_OUTVLD_B 1 - __le16 flag; - __le16 retval; - __le16 rsv; - __le32 data[HCLGE_DESC_DATA_LEN]; -}; - -struct hclge_cmq_ring { - dma_addr_t desc_dma_addr; - struct hclge_desc *desc; - struct hclge_dev *dev; - u32 head; - u32 tail; - - u16 buf_size; - u16 desc_num; - int next_to_use; - int next_to_clean; - u8 ring_type; /* cmq ring type */ - spinlock_t lock; /* Command queue lock */ -}; - -enum hclge_cmd_return_status { - HCLGE_CMD_EXEC_SUCCESS = 0, - HCLGE_CMD_NO_AUTH = 1, - HCLGE_CMD_NOT_SUPPORTED = 2, - HCLGE_CMD_QUEUE_FULL = 3, - HCLGE_CMD_NEXT_ERR = 4, - HCLGE_CMD_UNEXE_ERR = 5, - HCLGE_CMD_PARA_ERR = 6, - HCLGE_CMD_RESULT_ERR = 7, - HCLGE_CMD_TIMEOUT = 8, - HCLGE_CMD_HILINK_ERR = 9, - HCLGE_CMD_QUEUE_ILLEGAL = 10, - HCLGE_CMD_INVALID = 11, -}; - -enum hclge_cmd_status { - HCLGE_STATUS_SUCCESS = 0, - HCLGE_ERR_CSQ_FULL = -1, - HCLGE_ERR_CSQ_TIMEOUT = -2, - HCLGE_ERR_CSQ_ERROR = -3, -}; - struct hclge_misc_vector { u8 __iomem *addr; int vector_irq; char name[HNAE3_INT_NAME_LEN]; }; -struct hclge_cmq { - struct hclge_cmq_ring csq; - struct hclge_cmq_ring crq; - u16 tx_timeout; - enum hclge_cmd_status last_status; -}; - -#define HCLGE_CMD_FLAG_IN BIT(0) -#define HCLGE_CMD_FLAG_OUT BIT(1) -#define HCLGE_CMD_FLAG_NEXT BIT(2) -#define HCLGE_CMD_FLAG_WR BIT(3) -#define HCLGE_CMD_FLAG_NO_INTR BIT(4) -#define HCLGE_CMD_FLAG_ERR_INTR BIT(5) - -enum hclge_opcode_type { - /* Generic commands */ - HCLGE_OPC_QUERY_FW_VER = 0x0001, - HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, - HCLGE_OPC_GBL_RST_STATUS = 0x0021, - HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, - HCLGE_OPC_QUERY_PF_RSRC = 0x0023, - HCLGE_OPC_QUERY_VF_RSRC = 0x0024, - HCLGE_OPC_GET_CFG_PARAM = 0x0025, - HCLGE_OPC_PF_RST_DONE = 0x0026, - HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027, - - HCLGE_OPC_STATS_64_BIT = 0x0030, - HCLGE_OPC_STATS_32_BIT = 0x0031, - HCLGE_OPC_STATS_MAC = 0x0032, - HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, - HCLGE_OPC_STATS_MAC_ALL = 0x0034, - - HCLGE_OPC_QUERY_REG_NUM = 0x0040, - HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, - HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, - HCLGE_OPC_DFX_BD_NUM = 0x0043, - HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, - HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, - HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, - HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, - HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, - HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, - HCLGE_OPC_DFX_NCSI_REG = 0x004A, - HCLGE_OPC_DFX_RTC_REG = 0x004B, - HCLGE_OPC_DFX_PPP_REG = 0x004C, - HCLGE_OPC_DFX_RCB_REG = 0x004D, - HCLGE_OPC_DFX_TQP_REG = 0x004E, - HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, - - HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, - - /* MAC command */ - HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, - HCLGE_OPC_CONFIG_AN_MODE = 0x0304, - HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, - HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, - HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, - HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, - HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, - HCLGE_OPC_COMMON_LOOPBACK = 0x0315, - HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, - - /* PTP commands */ - HCLGE_OPC_PTP_INT_EN = 0x0501, - HCLGE_OPC_PTP_MODE_CFG = 0x0507, - - /* PFC/Pause commands */ - HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, - HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, - HCLGE_OPC_CFG_MAC_PARA = 0x0703, - HCLGE_OPC_CFG_PFC_PARA = 0x0704, - HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, - HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, - HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, - HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, - HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, - HCLGE_OPC_QOS_MAP = 0x070A, - - /* ETS/scheduler commands */ - HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, - HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, - HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, - HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, - HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, - HCLGE_OPC_TM_PG_WEIGHT = 0x0809, - HCLGE_OPC_TM_QS_WEIGHT = 0x080A, - HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, - HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, - HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, - HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, - HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, - HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, - HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, - HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, - HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, - HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, - HCLGE_OPC_TM_NODES = 0x0816, - HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, - HCLGE_OPC_QSET_DFX_STS = 0x0844, - HCLGE_OPC_PRI_DFX_STS = 0x0845, - HCLGE_OPC_PG_DFX_STS = 0x0846, - HCLGE_OPC_PORT_DFX_STS = 0x0847, - HCLGE_OPC_SCH_NQ_CNT = 0x0848, - HCLGE_OPC_SCH_RQ_CNT = 0x0849, - HCLGE_OPC_TM_INTERNAL_STS = 0x0850, - HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, - HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, - - /* Packet buffer allocate commands */ - HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, - HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, - HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, - HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, - HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, - HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, - - /* TQP management command */ - HCLGE_OPC_SET_TQP_MAP = 0x0A01, - - /* TQP commands */ - HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, - HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, - HCLGE_OPC_QUERY_TX_STATS = 0x0B03, - HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, - HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, - HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, - HCLGE_OPC_QUERY_RX_STATS = 0x0B13, - HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, - HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, - HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, - HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, - - /* PPU commands */ - HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A, - - /* TSO command */ - HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, - HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, - - /* RSS commands */ - HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, - HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, - HCLGE_OPC_RSS_TC_MODE = 0x0D08, - HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, - - /* Promisuous mode command */ - HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, - - /* Vlan offload commands */ - HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, - HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, - - /* Interrupts commands */ - HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, - HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, - - /* MAC commands */ - HCLGE_OPC_MAC_VLAN_ADD = 0x1000, - HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, - HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, - HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, - HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, - HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, - HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - - /* MAC VLAN commands */ - HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033, - - /* VLAN commands */ - HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, - HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, - HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, - HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103, - - /* Flow Director commands */ - HCLGE_OPC_FD_MODE_CTRL = 0x1200, - HCLGE_OPC_FD_GET_ALLOCATION = 0x1201, - HCLGE_OPC_FD_KEY_CONFIG = 0x1202, - HCLGE_OPC_FD_TCAM_OP = 0x1203, - HCLGE_OPC_FD_AD_OP = 0x1204, - HCLGE_OPC_FD_CNT_OP = 0x1205, - HCLGE_OPC_FD_USER_DEF_OP = 0x1207, - - /* MDIO command */ - HCLGE_OPC_MDIO_CONFIG = 0x1900, - - /* QCN commands */ - HCLGE_OPC_QCN_MOD_CFG = 0x1A01, - HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, - HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03, - HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, - HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, - HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, - HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, - HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, - - /* Mailbox command */ - HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, - - /* Led command */ - HCLGE_OPC_LED_STATUS_CFG = 0xB000, - - /* clear hardware resource command */ - HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, - - /* NCL config command */ - HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, - - /* IMP stats command */ - HCLGE_OPC_IMP_STATS_BD = 0x7012, - HCLGE_OPC_IMP_STATS_INFO = 0x7013, - HCLGE_OPC_IMP_COMPAT_CFG = 0x701A, - - /* SFP command */ - HCLGE_OPC_GET_SFP_EEPROM = 0x7100, - HCLGE_OPC_GET_SFP_EXIST = 0x7101, - HCLGE_OPC_GET_SFP_INFO = 0x7104, - - /* Error INT commands */ - HCLGE_MAC_COMMON_INT_EN = 0x030E, - HCLGE_TM_SCH_ECC_INT_EN = 0x0829, - HCLGE_SSU_ECC_INT_CMD = 0x0989, - HCLGE_SSU_COMMON_INT_CMD = 0x098C, - HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, - HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, - HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, - HCLGE_COMMON_ECC_INT_CFG = 0x1505, - HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, - HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, - HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, - HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, - HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516, - HCLGE_QUERY_ALL_ERR_INFO = 0x1517, - HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, - HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, - HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, - HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585, - HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586, - HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, - HCLGE_IGU_COMMON_INT_EN = 0x1806, - HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, - HCLGE_PPP_CMD0_INT_CMD = 0x2100, - HCLGE_PPP_CMD1_INT_CMD = 0x2101, - HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, - HCLGE_NCSI_INT_EN = 0x2401, - - /* PHY command */ - HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, - HCLGE_OPC_PHY_REG = 0x7026, - - /* Query link diagnosis info command */ - HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, -}; +#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read) #define HCLGE_TQP_REG_OFFSET 0x80000 #define HCLGE_TQP_REG_SIZE 0x200 @@ -391,38 +89,6 @@ struct hclge_rx_priv_buff_cmd { u8 rsv[6]; }; -enum HCLGE_CAP_BITS { - HCLGE_CAP_UDP_GSO_B, - HCLGE_CAP_QB_B, - HCLGE_CAP_FD_FORWARD_TC_B, - HCLGE_CAP_PTP_B, - HCLGE_CAP_INT_QL_B, - HCLGE_CAP_HW_TX_CSUM_B, - HCLGE_CAP_TX_PUSH_B, - HCLGE_CAP_PHY_IMP_B, - HCLGE_CAP_TQP_TXRX_INDEP_B, - HCLGE_CAP_HW_PAD_B, - HCLGE_CAP_STASH_B, - HCLGE_CAP_UDP_TUNNEL_CSUM_B, - HCLGE_CAP_RAS_IMP_B = 12, - HCLGE_CAP_FEC_B = 13, - HCLGE_CAP_PAUSE_B = 14, - HCLGE_CAP_RXD_ADV_LAYOUT_B = 15, - HCLGE_CAP_PORT_VLAN_BYPASS_B = 17, -}; - -enum HCLGE_API_CAP_BITS { - HCLGE_API_CAP_FLEX_RSS_TBL_B, -}; - -#define HCLGE_QUERY_CAP_LENGTH 3 -struct hclge_query_version_cmd { - __le32 firmware; - __le32 hardware; - __le32 api_caps; - __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */ -}; - #define HCLGE_RX_PRIV_EN_B 15 #define HCLGE_TC_NUM_ONE_DESC 4 struct hclge_priv_wl { @@ -571,38 +237,10 @@ struct hclge_vf_num_cmd { }; #define HCLGE_RSS_DEFAULT_OUTPORT_B 4 -#define HCLGE_RSS_HASH_KEY_OFFSET_B 4 -#define HCLGE_RSS_HASH_KEY_NUM 16 -struct hclge_rss_config_cmd { - u8 hash_config; - u8 rsv[7]; - u8 hash_key[HCLGE_RSS_HASH_KEY_NUM]; -}; - -struct hclge_rss_input_tuple_cmd { - u8 ipv4_tcp_en; - u8 ipv4_udp_en; - u8 ipv4_sctp_en; - u8 ipv4_fragment_en; - u8 ipv6_tcp_en; - u8 ipv6_udp_en; - u8 ipv6_sctp_en; - u8 ipv6_fragment_en; - u8 rsv[16]; -}; -#define HCLGE_RSS_CFG_TBL_SIZE 16 #define HCLGE_RSS_CFG_TBL_SIZE_H 4 -#define HCLGE_RSS_CFG_TBL_BW_H 2U #define HCLGE_RSS_CFG_TBL_BW_L 8U -struct hclge_rss_indirection_table_cmd { - __le16 start_table_index; - __le16 rss_set_bitmap; - u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H]; - u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE]; -}; - #define HCLGE_RSS_TC_OFFSET_S 0 #define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) #define HCLGE_RSS_TC_SIZE_MSB_B 11 @@ -610,10 +248,6 @@ struct hclge_rss_indirection_table_cmd { #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 #define HCLGE_RSS_TC_VALID_B 15 -struct hclge_rss_tc_mode_cmd { - __le16 rss_tc_mode[HCLGE_MAX_TC_NUM]; - u8 rsv[8]; -}; #define HCLGE_LINK_STATUS_UP_B 0 #define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) @@ -1015,16 +649,6 @@ struct hclge_common_lb_cmd { #define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ #define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */ -#define HCLGE_TYPE_CRQ 0 -#define HCLGE_TYPE_CSQ 1 - -/* this bit indicates that the driver is ready for hardware reset */ -#define HCLGE_NIC_SW_RST_RDY_B 16 -#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B) - -#define HCLGE_NIC_CMQ_DESC_NUM 1024 -#define HCLGE_NIC_CMQ_DESC_NUM_S 3 - #define HCLGE_LED_LOCATE_STATE_S 0 #define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) @@ -1147,16 +771,6 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd { u8 rsv[4]; }; -#define HCLGE_LINK_EVENT_REPORT_EN_B 0 -#define HCLGE_NCSI_ERROR_REPORT_EN_B 1 -#define HCLGE_PHY_IMP_EN_B 2 -#define HCLGE_MAC_STATS_EXT_EN_B 3 -#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4 -struct hclge_firmware_compat_cmd { - __le32 compat; - u8 rsv[20]; -}; - #define HCLGE_SFP_INFO_CMD_NUM 6 #define HCLGE_SFP_INFO_BD0_LEN 20 #define HCLGE_SFP_INFO_BDX_LEN 24 @@ -1239,44 +853,10 @@ struct hclge_phy_reg_cmd { u8 rsv1[18]; }; -/* capabilities bits map between imp firmware and local driver */ -struct hclge_caps_bit_map { - u16 imp_bit; - u16 local_bit; -}; - -int hclge_cmd_init(struct hclge_dev *hdev); -static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) -{ - writel(value, base + reg); -} - -#define hclge_write_dev(a, reg, value) \ - hclge_write_reg((a)->io_base, reg, value) -#define hclge_read_dev(a, reg) \ - hclge_read_reg((a)->io_base, reg) - -static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg) -{ - u8 __iomem *reg_addr = READ_ONCE(base); - - return readl(reg_addr + reg); -} - -#define HCLGE_SEND_SYNC(flag) \ - ((flag) & HCLGE_CMD_FLAG_NO_INTR) - struct hclge_hw; int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); -void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, - enum hclge_opcode_type opcode, bool is_read); -void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); - -enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, - struct hclge_desc *desc); -enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, - struct hclge_desc *desc); - -void hclge_cmd_uninit(struct hclge_dev *hdev); -int hclge_cmd_queue_init(struct hclge_dev *hdev); +enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 375ebf105a9a..69b8673436ca 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev) if (ret) return ret; - hclge_rss_indir_init_cfg(hdev); + hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); return hclge_rss_init_hw(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 4e0a8c2f7c05..9b870e79c290 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -77,6 +77,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { .cmd = HCLGE_OPC_DFX_TQP_REG } }, }; +/* make sure: len(name) + interval >= maxlen(item data) + 2, + * for example, name = "pkt_num"(len: 7), the prototype of item data is u32, + * and print as "%u"(maxlen: 10), so the interval should be at least 5. + */ static void hclge_dbg_fill_content(char *content, u16 len, const struct hclge_dbg_item *items, const char **result, u16 size) @@ -99,7 +103,7 @@ static void hclge_dbg_fill_content(char *content, u16 len, static char *hclge_dbg_get_func_id_str(char *buf, u8 id) { if (id) - sprintf(buf, "vf%u", id - 1); + sprintf(buf, "vf%u", id - 1U); else sprintf(buf, "pf"); @@ -146,7 +150,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev, desc->data[0] = cpu_to_le32(index); for (i = 1; i < bd_num; i++) { - desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); desc++; hclge_cmd_setup_basic_desc(desc, cmd, true); } @@ -258,12 +262,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev, return 0; } +static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = { + {HCLGE_MAC_TX_EN_B, "mac_trans_en"}, + {HCLGE_MAC_RX_EN_B, "mac_rcv_en"}, + {HCLGE_MAC_PAD_TX_B, "pad_trans_en"}, + {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"}, + {HCLGE_MAC_1588_TX_B, "1588_trans_en"}, + {HCLGE_MAC_1588_RX_B, "1588_rcv_en"}, + {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"}, + {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"}, + {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"}, + {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"}, + {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"}, + {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"}, + {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"}, + {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"} +}; + static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, int len, int *pos) { struct hclge_config_mac_mode_cmd *req; struct hclge_desc desc; - u32 loop_en; + u32 loop_en, i, offset; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); @@ -278,39 +299,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, req = (struct hclge_config_mac_mode_cmd *)desc.data; loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); - *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_rx_oversize_truncate_en: %#x\n", - hnae3_get_bit(loop_en, - HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_tx_under_min_err_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_tx_oversize_truncate_en: %#x\n", - hnae3_get_bit(loop_en, - HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); + for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) { + offset = hclge_dbg_mac_en_status[i].offset; + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + hclge_dbg_mac_en_status[i].message, + hnae3_get_bit(loop_en, offset)); + } return 0; } @@ -788,7 +782,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) data_str = kcalloc(ARRAY_SIZE(tm_pg_items), HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL); - if (!data_str) return -ENOMEM; @@ -1273,7 +1266,7 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf, int i, ret; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true); ret = hclge_cmd_send(&hdev->hw, desc, 2); if (ret) { @@ -1309,7 +1302,7 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev, int i, ret; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true); ret = hclge_cmd_send(&hdev->hw, desc, 2); if (ret) { @@ -1454,9 +1447,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, u32 *req; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; @@ -1614,8 +1607,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) return 0; } +static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = { + {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"}, + {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"}, + {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"}, + {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"}, + {HCLGE_GLOBAL_RESET_REG, "hardware reset status"}, + {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"}, + {HCLGE_FUN_RST_ING, "function reset status"} +}; + int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) { + u32 i, offset; int pos = 0; pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", @@ -1634,22 +1638,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) hdev->rst_stats.reset_cnt); pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt); - pos += scnprintf(buf + pos, len - pos, - "vector0 interrupt enable status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); - pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); - pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); - pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n", - hclge_read_dev(&hdev->hw, - HCLGE_RAS_PF_OTHER_INT_STS_REG)); - pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); - pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); - pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) { + offset = hclge_dbg_rst_info[i].offset; + pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n", + hclge_dbg_rst_info[i].message, + hclge_read_dev(&hdev->hw, offset)); + } + pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", hdev->state); @@ -1771,7 +1767,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, - char *buf, int *len, int *pos) + char *buf, int len, int *pos) { #define HCLGE_CMD_DATA_NUM 6 @@ -1783,7 +1779,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index, if (i == 0 && j == 0) continue; - *pos += scnprintf(buf + *pos, *len - *pos, + *pos += scnprintf(buf + *pos, len - *pos, "0x%04x | 0x%08x\n", offset, le32_to_cpu(desc[i].data[j])); @@ -1821,7 +1817,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len) if (ret) return ret; - hclge_ncl_config_data_print(desc, &index, buf, &len, &pos); + hclge_ncl_config_data_print(desc, &index, buf, len, &pos); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index c526591a7240..724052928b88 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -94,6 +94,11 @@ struct hclge_dbg_func { char *buf, int len); }; +struct hclge_dbg_status_dfx_info { + u32 offset; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { {false, "Reserved"}, {true, "BP_CPU_STATE"}, @@ -321,10 +326,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { {true, "IGU_RX_OUT_UDP0_PKT"}, {true, "IGU_RX_IN_UDP0_PKT"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, + {true, "IGU_MC_CAR_DROP_PKT_L"}, + {true, "IGU_MC_CAR_DROP_PKT_H"}, + {true, "IGU_BC_CAR_DROP_PKT_L"}, + {true, "IGU_BC_CAR_DROP_PKT_H"}, {false, "Reserved"}, {true, "IGU_RX_OVERSIZE_PKT_L"}, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 20e628c2bd44..42a9e73d8588 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1399,7 +1399,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) /* configure common error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); if (en) { @@ -1498,7 +1498,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, /* configure PPP error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); if (cmd == HCLGE_PPP_CMD0_INT_CMD) { @@ -1633,7 +1633,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, /* configure PPU error interrupts */ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); if (en) { desc[0].data[0] = @@ -1718,7 +1718,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) /* configure SSU ecc error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); if (en) { desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); @@ -1740,7 +1740,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) /* configure SSU common error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); if (en) { @@ -1963,7 +1963,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, &ae_dev->hw_err_reset_req); /* clear all main PF RAS errors */ - hclge_cmd_reuse_desc(&desc[0], false); + hclge_comm_cmd_reuse_desc(&desc[0], false); ret = hclge_cmd_send(&hdev->hw, &desc[0], num); if (ret) dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); @@ -2036,7 +2036,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, } /* clear all PF RAS errors */ - hclge_cmd_reuse_desc(&desc[0], false); + hclge_comm_cmd_reuse_desc(&desc[0], false); ret = hclge_cmd_send(&hdev->hw, &desc[0], num); if (ret) dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); @@ -2087,8 +2087,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev) true); hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); ret = hclge_cmd_send(&hdev->hw, &desc[0], 3); if (ret) { @@ -2119,7 +2119,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev) ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD, - HCLGE_CMD_FLAG_NEXT); + HCLGE_COMM_CMD_FLAG_NEXT); if (ret) { dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret); return ret; @@ -2235,7 +2235,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) } /* clear error status */ - hclge_cmd_reuse_desc(&desc[0], false); + hclge_comm_cmd_reuse_desc(&desc[0], false); ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); if (ret) { dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); @@ -2405,7 +2405,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev, else desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT); - desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR | + HCLGE_COMM_CMD_FLAG_IN); return hclge_cmd_send(&hdev->hw, &desc[0], bd_num); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c2a58101144e..24f7afacae02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -24,6 +24,7 @@ #include "hclge_err.h" #include "hnae3.h" #include "hclge_devlink.h" +#include "hclge_comm_cmd.h" #define HCLGE_NAME "hclge" @@ -90,20 +91,20 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG, - HCLGE_NIC_CSQ_BASEADDR_H_REG, - HCLGE_NIC_CSQ_DEPTH_REG, - HCLGE_NIC_CSQ_TAIL_REG, - HCLGE_NIC_CSQ_HEAD_REG, - HCLGE_NIC_CRQ_BASEADDR_L_REG, - HCLGE_NIC_CRQ_BASEADDR_H_REG, - HCLGE_NIC_CRQ_DEPTH_REG, - HCLGE_NIC_CRQ_TAIL_REG, - HCLGE_NIC_CRQ_HEAD_REG, - HCLGE_VECTOR0_CMDQ_SRC_REG, - HCLGE_CMDQ_INTR_STS_REG, - HCLGE_CMDQ_INTR_EN_REG, - HCLGE_CMDQ_INTR_GEN_REG}; +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_CMDQ_INTR_STS_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, HCLGE_PF_OTHER_INT_REG, @@ -370,14 +371,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static const u8 hclge_hash_key[] = { - 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, - 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, - 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, - 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, - 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA -}; - static const u32 hclge_dfx_bd_offset_list[] = { HCLGE_DFX_BIOS_BD_OFFSET, HCLGE_DFX_SSU_0_BD_OFFSET, @@ -478,6 +471,20 @@ static const struct key_info tuple_key_info[] = { offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, }; +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + return hclge_comm_cmd_send(&hw->hw, desc, num); +} + static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -604,111 +611,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev) return hclge_mac_update_stats_defective(hdev); } -static int hclge_tqps_update_stats(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct hnae3_queue *queue; - struct hclge_desc desc[1]; - struct hclge_tqp *tqp; - int ret, i; - - for (i = 0; i < kinfo->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclge_tqp, q); - /* command : HCLGE_OPC_QUERY_IGU_STAT */ - hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS, - true); - - desc[0].data[0] = cpu_to_le32(tqp->index); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - ret, i); - return ret; - } - tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[1]); - } - - for (i = 0; i < kinfo->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclge_tqp, q); - /* command : HCLGE_OPC_QUERY_IGU_STAT */ - hclge_cmd_setup_basic_desc(&desc[0], - HCLGE_OPC_QUERY_TX_STATS, - true); - - desc[0].data[0] = cpu_to_le32(tqp->index); - ret = hclge_cmd_send(&hdev->hw, desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - ret, i); - return ret; - } - tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[1]); - } - - return 0; -} - -static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclge_tqp *tqp; - u64 *buff = data; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; - } - - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; - } - - return buff; -} - -static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - - /* each tqp has TX & RX two queues */ - return kinfo->num_tqps * (2); -} - -static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - u8 *buff = data; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], - struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", - tqp->index); - buff = buff + ETH_GSTRING_LEN; - } - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclge_tqp *tqp = container_of(kinfo->tqp[i], - struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", - tqp->index); - buff = buff + ETH_GSTRING_LEN; - } - - return buff; -} - static int hclge_comm_get_count(struct hclge_dev *hdev, const struct hclge_comm_stats_str strs[], u32 size) @@ -769,7 +671,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) handle = &hdev->vport[0].nic; if (handle->client) { - status = hclge_tqps_update_stats(handle); + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); if (status) { dev_err(&hdev->pdev->dev, "Update TQPS stats fail, status = %d.\n", @@ -799,7 +701,7 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update MAC stats fail, status = %d.\n", status); - status = hclge_tqps_update_stats(handle); + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); if (status) dev_err(&hdev->pdev->dev, "Update TQPS stats fail, status = %d.\n", @@ -848,7 +750,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) } else if (stringset == ETH_SS_STATS) { count = hclge_comm_get_count(hdev, g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string)) + - hclge_tqps_get_sset_count(handle, stringset); + hclge_comm_tqps_get_sset_count(handle); } return count; @@ -866,7 +768,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, size = ARRAY_SIZE(g_mac_stats_string); p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, size, p); - p = hclge_tqps_get_strings(handle, p); + p = hclge_comm_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], @@ -900,7 +802,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) p = hclge_comm_get_stats(hdev, g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); - p = hclge_tqps_get_stats(handle, p); + p = hclge_comm_tqps_get_stats(handle, p); } static void hclge_get_mac_stat(struct hnae3_handle *handle, @@ -1480,7 +1382,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; - ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; @@ -1520,7 +1422,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) if (!dev_specs->rss_ind_tbl_size) dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; if (!dev_specs->rss_key_size) - dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE; + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; if (!dev_specs->max_tm_rate) dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; if (!dev_specs->max_qset_num) @@ -1567,7 +1469,7 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev) for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); } hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); @@ -1613,12 +1515,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) hdev->num_rx_desc = HCLGE_MIN_RX_DESC; } +static void hclge_init_tc_config(struct hclge_dev *hdev) +{ + unsigned int i; + + if (hdev->tc_max > HNAE3_MAX_TC || + hdev->tc_max < 1) { + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", + hdev->tc_max); + hdev->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = 1; + + /* Currently not support uncontiuous tc */ + for (i = 0; i < hdev->tm_info.num_tc; i++) + hnae3_set_bit(hdev->hw_tc_map, i, 1); + + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - unsigned int i; int node, ret; ret = hclge_get_cfg(hdev, &cfg); @@ -1662,29 +1591,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); - if ((hdev->tc_max > HNAE3_MAX_TC) || - (hdev->tc_max < 1)) { - dev_warn(&hdev->pdev->dev, "TC num = %u.\n", - hdev->tc_max); - hdev->tc_max = 1; - } - - /* Dev does not support DCB */ - if (!hnae3_dev_dcb_supported(hdev)) { - hdev->tc_max = 1; - hdev->pfc_max = 0; - } else { - hdev->pfc_max = hdev->tc_max; - } - - hdev->tm_info.num_tc = 1; - - /* Currently not support uncontiuous tc */ - for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae3_set_bit(hdev->hw_tc_map, i, 1); - - hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; - + hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); /* Set the affinity based on numa node */ @@ -1736,11 +1643,11 @@ static int hclge_config_gro(struct hclge_dev *hdev) static int hclge_alloc_tqps(struct hclge_dev *hdev) { - struct hclge_tqp *tqp; + struct hclge_comm_tqp *tqp; int i; hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, - sizeof(struct hclge_tqp), GFP_KERNEL); + sizeof(struct hclge_comm_tqp), GFP_KERNEL); if (!hdev->htqp) return -ENOMEM; @@ -1759,11 +1666,11 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) * HCLGE_TQP_MAX_SIZE_DEV_V2 */ if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGE_TQP_REG_OFFSET + i * HCLGE_TQP_REG_SIZE; else - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGE_TQP_REG_OFFSET + HCLGE_TQP_EXT_REG_OFFSET + (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * @@ -1864,8 +1771,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, kinfo = &nic->kinfo; for (i = 0; i < vport->alloc_tqps; i++) { - struct hclge_tqp *q = - container_of(kinfo->tqp[i], struct hclge_tqp, q); + struct hclge_comm_tqp *q = + container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); bool is_pf; int ret; @@ -1885,7 +1792,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev) u16 i, num_vport; num_vport = hdev->num_req_vfs + 1; - for (i = 0; i < num_vport; i++) { + for (i = 0; i < num_vport; i++) { int ret; ret = hclge_map_tqp_to_vport(hdev, vport); @@ -1907,7 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; - nic->kinfo.io_base = hdev->hw.io_base; + nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclge_knic_setup(vport, num_tqps, hdev->num_tx_desc, hdev->num_rx_desc); @@ -2416,9 +2323,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, /* The first descriptor set the NEXT bit to 1 */ if (i == 0) - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); else - desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; @@ -2461,9 +2368,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, /* The first descriptor set the NEXT bit to 1 */ if (i == 0) - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); else - desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; @@ -2592,8 +2499,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) roce->rinfo.base_vector = hdev->num_nic_msi; roce->rinfo.netdev = nic->kinfo.netdev; - roce->rinfo.roce_io_base = hdev->hw.io_base; - roce->rinfo.roce_mem_base = hdev->hw.mem_base; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; @@ -2653,11 +2560,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed) return duplex; } +static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, + {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, + {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, + {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, + {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, + {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, + {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, + {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, + {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, +}; + +static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { + if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { + *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; + return 0; + } + } + + return -EINVAL; +} + static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; + u32 speed_fw; int ret; req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; @@ -2667,48 +2601,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, if (duplex) hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); - switch (speed) { - case HCLGE_MAC_SPEED_10M: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M); - break; - case HCLGE_MAC_SPEED_100M: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M); - break; - case HCLGE_MAC_SPEED_1G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G); - break; - case HCLGE_MAC_SPEED_10G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G); - break; - case HCLGE_MAC_SPEED_25G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G); - break; - case HCLGE_MAC_SPEED_40G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G); - break; - case HCLGE_MAC_SPEED_50G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G); - break; - case HCLGE_MAC_SPEED_100G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G); - break; - case HCLGE_MAC_SPEED_200G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G); - break; - default: + ret = hclge_convert_to_fw_speed(speed, &speed_fw); + if (ret) { dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); - return -EINVAL; + return ret; } + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, + speed_fw); hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 1); @@ -2933,16 +2833,20 @@ static int hclge_mac_init(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { + hdev->last_mbx_scheduled = jiffies; mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } } static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { + hdev->last_rst_scheduled = jiffies; mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } } static void hclge_errhand_task_schedule(struct hclge_dev *hdev) @@ -3237,7 +3141,7 @@ static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, true); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, true); @@ -3294,7 +3198,7 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle, hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, false); @@ -3501,7 +3405,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); hdev->rst_stats.imp_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; @@ -3509,7 +3413,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { dev_info(&hdev->pdev->dev, "global reset interrupt\n"); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); hdev->rst_stats.global_rst_cnt++; @@ -3643,7 +3547,7 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) vector->vector_irq = pci_irq_vector(hdev->pdev, 0); - vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; + vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; hdev->vector_status[0] = 0; hdev->num_msi_left -= 1; @@ -3827,10 +3731,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) static void hclge_mailbox_service_task(struct hclge_dev *hdev) { if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || - test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) || + test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) return; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "mbx service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), + smp_processor_id()); + hclge_mbx_handler(hdev); clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); @@ -3865,7 +3776,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) return; } msleep(HCLGE_PF_RESET_SYNC_TIME); - hclge_cmd_reuse_desc(&desc, true); + hclge_comm_cmd_reuse_desc(&desc, true); } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); @@ -4022,13 +3933,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) { u32 reg_val; - reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); + reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); if (enable) - reg_val |= HCLGE_NIC_SW_RST_RDY; + reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; else - reg_val &= ~HCLGE_NIC_SW_RST_RDY; + reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; - hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); } static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) @@ -4065,9 +3976,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) /* After performaning pf reset, it is not necessary to do the * mailbox handling or send any command to firmware, because * any mailbox handling or command to firmware is only valid - * after hclge_cmd_init is called. + * after hclge_comm_cmd_init is called. */ - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); hdev->rst_stats.pf_rst_cnt++; break; case HNAE3_FLR_RESET: @@ -4480,6 +4391,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev) if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) return; + if (time_is_before_jiffies(hdev->last_rst_scheduled + + HCLGE_RESET_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "reset service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), + smp_processor_id()); + down(&hdev->reset_sem); set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); @@ -4614,11 +4532,11 @@ static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, /* need an extend offset to config vector >= 64 */ if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) - vector_info->io_addr = hdev->hw.io_base + + vector_info->io_addr = hdev->hw.hw.io_base + HCLGE_VECTOR_REG_BASE + (idx - 1) * HCLGE_VECTOR_REG_OFFSET; else - vector_info->io_addr = hdev->hw.io_base + + vector_info->io_addr = hdev->hw.hw.io_base + HCLGE_VECTOR_EXT_REG_BASE + (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * HCLGE_VECTOR_REG_OFFSET_H + @@ -4688,334 +4606,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector) return 0; } -static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) -{ - return HCLGE_RSS_KEY_SIZE; -} - -static int hclge_set_rss_algo_key(struct hclge_dev *hdev, - const u8 hfunc, const u8 *key) -{ - struct hclge_rss_config_cmd *req; - unsigned int key_offset = 0; - struct hclge_desc desc; - int key_counts; - int key_size; - int ret; - - key_counts = HCLGE_RSS_KEY_SIZE; - req = (struct hclge_rss_config_cmd *)desc.data; - - while (key_counts) { - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, - false); - - req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); - req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); - - key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts); - memcpy(req->hash_key, - key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); - - key_counts -= key_size; - key_offset++; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Configure RSS config fail, status = %d\n", - ret); - return ret; - } - } - return 0; -} - -static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir) -{ - struct hclge_rss_indirection_table_cmd *req; - struct hclge_desc desc; - int rss_cfg_tbl_num; - u8 rss_msb_oft; - u8 rss_msb_val; - int ret; - u16 qid; - int i; - u32 j; - - req = (struct hclge_rss_indirection_table_cmd *)desc.data; - rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / - HCLGE_RSS_CFG_TBL_SIZE; - - for (i = 0; i < rss_cfg_tbl_num; i++) { - hclge_cmd_setup_basic_desc - (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); - - req->start_table_index = - cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); - req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); - for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) { - qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; - req->rss_qid_l[j] = qid & 0xff; - rss_msb_oft = - j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; - rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) << - (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); - req->rss_qid_h[rss_msb_oft] |= rss_msb_val; - } - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Configure rss indir table fail,status = %d\n", - ret); - return ret; - } - } - return 0; -} - -static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, - u16 *tc_size, u16 *tc_offset) -{ - struct hclge_rss_tc_mode_cmd *req; - struct hclge_desc desc; - int ret; - int i; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); - req = (struct hclge_rss_tc_mode_cmd *)desc.data; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - u16 mode = 0; - - hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, - HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B, - tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1); - hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, - HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); - - req->rss_tc_mode[i] = cpu_to_le16(mode); - } - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Configure rss tc mode fail, status = %d\n", ret); - - return ret; -} - -static void hclge_get_rss_type(struct hclge_vport *vport) -{ - if (vport->rss_tuple_sets.ipv4_tcp_en || - vport->rss_tuple_sets.ipv4_udp_en || - vport->rss_tuple_sets.ipv4_sctp_en || - vport->rss_tuple_sets.ipv6_tcp_en || - vport->rss_tuple_sets.ipv6_udp_en || - vport->rss_tuple_sets.ipv6_sctp_en) - vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; - else if (vport->rss_tuple_sets.ipv4_fragment_en || - vport->rss_tuple_sets.ipv6_fragment_en) - vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; - else - vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; -} - -static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) -{ - struct hclge_rss_input_tuple_cmd *req; - struct hclge_desc desc; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); - - req = (struct hclge_rss_input_tuple_cmd *)desc.data; - - /* Get the tuple cfg from pf */ - req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; - hclge_get_rss_type(&hdev->vport[0]); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Configure rss input fail, status = %d\n", ret); - return ret; -} - static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); - int i; + struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; - /* Get hash algorithm */ - if (hfunc) { - switch (vport->rss_algo) { - case HCLGE_RSS_HASH_ALGO_TOEPLITZ: - *hfunc = ETH_RSS_HASH_TOP; - break; - case HCLGE_RSS_HASH_ALGO_SIMPLE: - *hfunc = ETH_RSS_HASH_XOR; - break; - default: - *hfunc = ETH_RSS_HASH_UNKNOWN; - break; - } - } - - /* Get the RSS Key required by the user */ - if (key) - memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); - /* Get indirect table */ - if (indir) - for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) - indir[i] = vport->rss_indirection_tbl[i]; + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + ae_dev->dev_specs.rss_ind_tbl_size); return 0; } -static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc, - u8 *hash_algo) -{ - switch (hfunc) { - case ETH_RSS_HASH_TOP: - *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - return 0; - case ETH_RSS_HASH_XOR: - *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; - return 0; - case ETH_RSS_HASH_NO_CHANGE: - *hash_algo = vport->rss_algo; - return 0; - default: - return -EINVAL; - } -} - static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, const u8 *key, const u8 hfunc) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - u8 hash_algo; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; int ret, i; - ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo); + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); if (ret) { dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); return ret; } - /* Set the RSS Hash Key if specififed by the user */ - if (key) { - ret = hclge_set_rss_algo_key(hdev, hash_algo, key); - if (ret) - return ret; - - /* Update the shadow RSS key with user specified qids */ - memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); - } else { - ret = hclge_set_rss_algo_key(hdev, hash_algo, - vport->rss_hash_key); - if (ret) - return ret; - } - vport->rss_algo = hash_algo; - /* Update the shadow RSS table with user specified qids */ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) - vport->rss_indirection_tbl[i] = indir[i]; + rss_cfg->rss_indirection_tbl[i] = indir[i]; /* Update the hardware */ - return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); -} - -static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) -{ - u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; - - if (nfc->data & RXH_L4_B_2_3) - hash_sets |= HCLGE_D_PORT_BIT; - else - hash_sets &= ~HCLGE_D_PORT_BIT; - - if (nfc->data & RXH_IP_SRC) - hash_sets |= HCLGE_S_IP_BIT; - else - hash_sets &= ~HCLGE_S_IP_BIT; - - if (nfc->data & RXH_IP_DST) - hash_sets |= HCLGE_D_IP_BIT; - else - hash_sets &= ~HCLGE_D_IP_BIT; - - if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) - hash_sets |= HCLGE_V_TAG_BIT; - - return hash_sets; -} - -static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport, - struct ethtool_rxnfc *nfc, - struct hclge_rss_input_tuple_cmd *req) -{ - struct hclge_dev *hdev = vport->back; - u8 tuple_sets; - - req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; - - tuple_sets = hclge_get_rss_hash_bits(nfc); - switch (nfc->flow_type) { - case TCP_V4_FLOW: - req->ipv4_tcp_en = tuple_sets; - break; - case TCP_V6_FLOW: - req->ipv6_tcp_en = tuple_sets; - break; - case UDP_V4_FLOW: - req->ipv4_udp_en = tuple_sets; - break; - case UDP_V6_FLOW: - req->ipv6_udp_en = tuple_sets; - break; - case SCTP_V4_FLOW: - req->ipv4_sctp_en = tuple_sets; - break; - case SCTP_V6_FLOW: - if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && - (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) - return -EINVAL; - - req->ipv6_sctp_en = tuple_sets; - break; - case IPV4_FLOW: - req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - break; - case IPV6_FLOW: - req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - break; - default: - return -EINVAL; - } - - return 0; + return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); } static int hclge_set_rss_tuple(struct hnae3_handle *handle, @@ -5023,92 +4650,20 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_rss_input_tuple_cmd *req; - struct hclge_desc desc; int ret; - if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - req = (struct hclge_rss_input_tuple_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); - - ret = hclge_init_rss_tuple_cmd(vport, nfc, req); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to init rss tuple cmd, ret = %d\n", ret); - return ret; - } - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); if (ret) { dev_err(&hdev->pdev->dev, - "Set rss tuple fail, status = %d\n", ret); + "failed to set rss tuple, ret = %d.\n", ret); return ret; } - vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; - vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; - vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; - vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; - vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; - vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; - vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; - vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; - hclge_get_rss_type(vport); - return 0; -} - -static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type, - u8 *tuple_sets) -{ - switch (flow_type) { - case TCP_V4_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; - break; - case UDP_V4_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; - break; - case TCP_V6_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; - break; - case UDP_V6_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; - break; - case SCTP_V4_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; - break; - case SCTP_V6_FLOW: - *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; - break; - case IPV4_FLOW: - case IPV6_FLOW: - *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; - break; - default: - return -EINVAL; - } - + hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets); return 0; } -static u64 hclge_convert_rss_tuple(u8 tuple_sets) -{ - u64 tuple_data = 0; - - if (tuple_sets & HCLGE_D_PORT_BIT) - tuple_data |= RXH_L4_B_2_3; - if (tuple_sets & HCLGE_S_PORT_BIT) - tuple_data |= RXH_L4_B_0_1; - if (tuple_sets & HCLGE_D_IP_BIT) - tuple_data |= RXH_IP_DST; - if (tuple_sets & HCLGE_S_IP_BIT) - tuple_data |= RXH_IP_SRC; - - return tuple_data; -} - static int hclge_get_rss_tuple(struct hnae3_handle *handle, struct ethtool_rxnfc *nfc) { @@ -5118,11 +4673,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle, nfc->data = 0; - ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets); + ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, + &tuple_sets); if (ret || !tuple_sets) return ret; - nfc->data = hclge_convert_rss_tuple(tuple_sets); + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); return 0; } @@ -5175,78 +4731,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) tc_offset[i] = tc_info->tqp_offset[i]; } - return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); } int hclge_rss_init_hw(struct hclge_dev *hdev) { - struct hclge_vport *vport = hdev->vport; - u16 *rss_indir = vport[0].rss_indirection_tbl; - u8 *key = vport[0].rss_hash_key; - u8 hfunc = vport[0].rss_algo; + u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; + u8 *key = hdev->rss_cfg.rss_hash_key; + u8 hfunc = hdev->rss_cfg.rss_algo; int ret; - ret = hclge_set_rss_indir_table(hdev, rss_indir); + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_indir); if (ret) return ret; - ret = hclge_set_rss_algo_key(hdev, hfunc, key); + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); if (ret) return ret; - ret = hclge_set_rss_input_tuple(hdev); + ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic, + &hdev->hw.hw, true, + &hdev->rss_cfg); if (ret) return ret; return hclge_init_rss_tc_mode(hdev); } -void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) -{ - struct hclge_vport *vport = &hdev->vport[0]; - int i; - - for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) - vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size; -} - -static int hclge_rss_init_cfg(struct hclge_dev *hdev) -{ - u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; - int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - struct hclge_vport *vport = &hdev->vport[0]; - u16 *rss_ind_tbl; - - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) - rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; - - vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; - vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport->rss_tuple_sets.ipv6_sctp_en = - hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? - HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT : - HCLGE_RSS_INPUT_TUPLE_SCTP; - vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - - vport->rss_algo = rss_algo; - - rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, - sizeof(*rss_ind_tbl), GFP_KERNEL); - if (!rss_ind_tbl) - return -ENOMEM; - - vport->rss_indirection_tbl = rss_ind_tbl; - memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE); - - hclge_rss_indir_init_cfg(hdev); - - return 0; -} - int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, struct hnae3_ring_chain_node *ring_chain) @@ -5256,7 +4769,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, struct hclge_desc desc; struct hclge_ctrl_vector_chain_cmd *req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; - enum hclge_cmd_status status; + enum hclge_comm_cmd_status status; enum hclge_opcode_type op; u16 tqp_type_and_id; int i; @@ -5886,9 +5399,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, int ret; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; @@ -6790,7 +6303,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, if (vf > hdev->num_req_vfs) { dev_err(&hdev->pdev->dev, "Error: vf id (%u) should be less than %u\n", - vf - 1, hdev->num_req_vfs); + vf - 1U, hdev->num_req_vfs); return -EINVAL; } @@ -6800,7 +6313,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, if (ring >= tqps) { dev_err(&hdev->pdev->dev, "Error: queue id (%u) > max tqp num (%u)\n", - ring, tqps - 1); + ring, tqps - 1U); return -EINVAL; } @@ -7161,6 +6674,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, } } +static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, + u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + } + + return NULL; +} + +static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } +} + static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -7168,7 +6712,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct hclge_fd_rule *rule = NULL; struct hclge_dev *hdev = vport->back; struct ethtool_rx_flow_spec *fs; - struct hlist_node *node2; if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; @@ -7177,14 +6720,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= fs->location) - break; - } - - if (!rule || fs->location != rule->location) { + rule = hclge_get_fd_rule(hdev, fs->location); + if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); - return -ENOENT; } @@ -7222,16 +6760,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ext_info(fs, rule); - if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { - fs->ring_cookie = RX_CLS_FLOW_DISC; - } else { - u64 vf_id; - - fs->ring_cookie = rule->queue_id; - vf_id = rule->vf_id; - vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; - fs->ring_cookie |= vf_id; - } + hclge_fd_get_ring_cookie(fs, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -7776,7 +7305,7 @@ static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); } static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) @@ -7866,7 +7395,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, } /* modify and write new config parameter */ - hclge_cmd_reuse_desc(&desc, false); + hclge_comm_cmd_reuse_desc(&desc, false); req->switch_param = (req->switch_param & param_mask) | switch_param; req->param_mask = param_mask; @@ -7960,7 +7489,7 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) /* 3 Config mac work mode with loopback flag * and its original configure parameters */ - hclge_cmd_reuse_desc(&desc, false); + hclge_comm_cmd_reuse_desc(&desc, false); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -7968,16 +7497,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, - enum hnae3_loop loop_mode) +static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) { -#define HCLGE_COMMON_LB_RETRY_MS 10 -#define HCLGE_COMMON_LB_RETRY_NUM 100 - struct hclge_common_lb_cmd *req; struct hclge_desc desc; - int ret, i = 0; u8 loop_mode_b; + int ret; req = (struct hclge_common_lb_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); @@ -7994,23 +7520,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, break; default: dev_err(&hdev->pdev->dev, - "unsupported common loopback mode %d\n", loop_mode); + "unsupported loopback mode %d\n", loop_mode); return -ENOTSUPP; } - if (en) { + req->mask = loop_mode_b; + if (en) req->enable = loop_mode_b; - req->mask = loop_mode_b; - } else { - req->mask = loop_mode_b; - } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, - "common loopback set fail, ret = %d\n", ret); - return ret; - } + "failed to send loopback cmd, loop_mode = %d, ret = %d\n", + loop_mode, ret); + + return ret; +} + +static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) +{ +#define HCLGE_COMMON_LB_RETRY_MS 10 +#define HCLGE_COMMON_LB_RETRY_NUM 100 + + struct hclge_common_lb_cmd *req; + struct hclge_desc desc; + u32 i = 0; + int ret; + + req = (struct hclge_common_lb_cmd *)desc.data; do { msleep(HCLGE_COMMON_LB_RETRY_MS); @@ -8019,20 +7556,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "common loopback get, ret = %d\n", ret); + "failed to get loopback done status, ret = %d\n", + ret); return ret; } } while (++i < HCLGE_COMMON_LB_RETRY_NUM && !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { - dev_err(&hdev->pdev->dev, "common loopback set timeout\n"); + dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); return -EBUSY; } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { - dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n"); + dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); return -EIO; } - return ret; + + return 0; +} + +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, + enum hnae3_loop loop_mode) +{ + int ret; + + ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); + if (ret) + return ret; + + return hclge_cfg_common_loopback_wait(hdev); } static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, @@ -8213,22 +7764,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev) HNAE3_LOOP_PARALLEL_SERDES); } -static void hclge_reset_tqp_stats(struct hnae3_handle *handle) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hnae3_knic_private_info *kinfo; - struct hnae3_queue *queue; - struct hclge_tqp *tqp; - int i; - - kinfo = &vport->nic.kinfo; - for (i = 0; i < kinfo->num_tqps; i++) { - queue = handle->kinfo.tqp[i]; - tqp = container_of(queue, struct hclge_tqp, q); - memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); - } -} - static void hclge_flush_link_update(struct hclge_dev *hdev) { #define HCLGE_FLUSH_LINK_TIMEOUT 100000 @@ -8270,7 +7805,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) hdev->hw.mac.link = 0; /* reset tqp stats */ - hclge_reset_tqp_stats(handle); + hclge_comm_reset_tqp_stats(handle); hclge_mac_start_phy(hdev); @@ -8308,7 +7843,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle) hclge_mac_stop_phy(hdev); /* reset tqp stats */ - hclge_reset_tqp_stats(handle); + hclge_comm_reset_tqp_stats(handle); hclge_update_link_status(hdev); } @@ -8511,14 +8046,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); if (is_mc) { - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); memcpy(desc[0].data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_MAC_VLAN_ADD, true); - desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_MAC_VLAN_ADD, true); @@ -8568,12 +8103,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, resp_code, HCLGE_MAC_VLAN_ADD); } else { - hclge_cmd_reuse_desc(&mc_desc[0], false); - mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_reuse_desc(&mc_desc[1], false); - mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - hclge_cmd_reuse_desc(&mc_desc[2], false); - mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[0], false); + mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[1], false); + mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_reuse_desc(&mc_desc[2], false); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); memcpy(mc_desc[0].data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); @@ -8743,6 +8278,7 @@ int hclge_update_mac_list(struct hclge_vport *vport, enum HCLGE_MAC_ADDR_TYPE mac_type, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_node *mac_node; struct list_head *list; @@ -8767,9 +8303,10 @@ int hclge_update_mac_list(struct hclge_vport *vport, /* if this address is never added, unnecessary to delete */ if (state == HCLGE_MAC_TO_DEL) { spin_unlock_bh(&vport->mac_list_lock); + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "failed to delete address %pM from mac list\n", - addr); + "failed to delete address %s from mac list\n", + format_mac_addr); return -ENOENT; } @@ -8802,6 +8339,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle, int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc; @@ -8812,9 +8350,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", - addr, is_zero_ether_addr(addr), + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), is_broadcast_ether_addr(addr), is_multicast_ether_addr(addr)); return -EINVAL; @@ -8871,6 +8410,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, int hclge_rm_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; int ret; @@ -8879,8 +8419,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", - addr); + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -8911,6 +8452,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, int hclge_add_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; @@ -8919,9 +8461,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Add mc mac err! invalid mac:%pM.\n", - addr); + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } memset(&req, 0, sizeof(req)); @@ -8973,16 +8516,18 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; - enum hclge_cmd_status status; + enum hclge_comm_cmd_status status; struct hclge_desc desc[3]; /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_dbg(&hdev->pdev->dev, - "Remove mc mac err! invalid mac:%pM.\n", - addr); + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -9422,16 +8967,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, u8 *mac_addr) { struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; vport = hclge_get_vf_vport(hdev, vf); if (!vport) return -EINVAL; + hnae3_format_mac_addr(format_mac_addr, mac_addr); if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { dev_info(&hdev->pdev->dev, - "Specified MAC(=%pM) is same as before, no change committed!\n", - mac_addr); + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); return 0; } @@ -9439,13 +8986,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { dev_info(&hdev->pdev->dev, - "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", - vf, mac_addr); + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); return hclge_inform_reset_assert_to_vf(vport); } - dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", - vf, mac_addr); + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", + vf, format_mac_addr); return 0; } @@ -9549,6 +9096,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; unsigned char *old_addr = NULL; int ret; @@ -9557,9 +9105,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, if (is_zero_ether_addr(new_addr) || is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "change uc mac err! invalid mac: %pM.\n", - new_addr); + "change uc mac err! invalid mac: %s.\n", + format_mac_addr); return -EINVAL; } @@ -9577,9 +9126,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, spin_lock_bh(&vport->mac_list_lock); ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "failed to change the mac addr:%pM, ret = %d\n", - new_addr, ret); + "failed to change the mac addr:%s, ret = %d\n", + format_mac_addr, ret); spin_unlock_bh(&vport->mac_list_lock); if (!is_first) @@ -9677,20 +9227,20 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "failed to get vlan filter config, ret = %d.\n", ret); + dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", + vf_id, ret); return ret; } /* modify and write new config parameter */ - hclge_cmd_reuse_desc(&desc, false); + hclge_comm_cmd_reuse_desc(&desc, false); req->vlan_fe = filter_en ? (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n", - ret); + dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", + vf_id, ret); return ret; } @@ -9809,7 +9359,7 @@ static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_VLAN_FILTER_VF_CFG, false); - desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); vf_byte_off = vfid / 8; vf_byte_val = 1 << (vfid % 8); @@ -9936,6 +9486,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, return ret; } +static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, + u16 vlan_id, bool is_kill) +{ + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return false; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Add port vlan failed, vport %u is already in vlan %u\n", + vport_id, vlan_id); + return false; + } + + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_warn(&hdev->pdev->dev, + "Delete port vlan failed, vport %u is not in vlan %u\n", + vport_id, vlan_id); + return false; + } + + return true; +} + static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_id, u16 vlan_id, bool is_kill) @@ -9957,26 +9533,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return ret; } - /* vlan 0 may be added twice when 8021q module is enabled */ - if (!is_kill && !vlan_id && - test_bit(vport_id, hdev->vlan_table[vlan_id])) + if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) return 0; - if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Add port vlan failed, vport %u is already in vlan %u\n", - vport_id, vlan_id); - return -EINVAL; - } - - if (is_kill && - !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { - dev_err(&hdev->pdev->dev, - "Delete port vlan failed, vport %u is not in vlan %u\n", - vport_id, vlan_id); - return -EINVAL; - } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; @@ -10168,67 +9727,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) return status; } -static int hclge_init_vlan_config(struct hclge_dev *hdev) +static int hclge_init_vlan_filter(struct hclge_dev *hdev) { -#define HCLGE_DEF_VLAN_TYPE 0x8100 - - struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - /* for revision 0x21, vf vlan filter is per function */ - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - ret = hclge_set_vlan_filter_ctrl(hdev, - HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, - true, - vport->vport_id); - if (ret) - return ret; - vport->cur_vlan_fltr_en = true; - } + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, - 0); - if (ret) - return ret; - } else { + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, - true, 0); + HCLGE_FILTER_FE_EGRESS, true, + vport->vport_id); if (ret) return ret; + vport->cur_vlan_fltr_en = true; } - hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true, 0); +} - ret = hclge_set_vlan_protocol_type(hdev); - if (ret) - return ret; +static int hclge_init_vlan_type(struct hclge_dev *hdev) +{ + hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; - for (i = 0; i < hdev->num_alloc_vport; i++) { - u16 vlan_tag; - u8 qos; + return hclge_set_vlan_protocol_type(hdev); +} +static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) +{ + struct hclge_port_base_vlan_config *cfg; + struct hclge_vport *vport; + int ret; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - qos = vport->port_base_vlan_cfg.vlan_info.qos; + cfg = &vport->port_base_vlan_cfg; - ret = hclge_vlan_offload_cfg(vport, - vport->port_base_vlan_cfg.state, - vlan_tag, qos); + ret = hclge_vlan_offload_cfg(vport, cfg->state, + cfg->vlan_info.vlan_tag, + cfg->vlan_info.qos); if (ret) return ret; } + return 0; +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + int ret; + + ret = hclge_init_vlan_filter(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_type(hdev); + if (ret) + return ret; + + ret = hclge_init_vport_vlan_offload(hdev); + if (ret) + return ret; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -10485,12 +10057,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, return false; } +static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), + vport->vport_id, new_info->vlan_tag, + false); + if (ret) + return ret; + + /* remove old VLAN tag */ + if (old_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + old_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_info->vlan_tag, ret); + + return ret; +} + int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info) { struct hnae3_handle *nic = &vport->nic; struct hclge_vlan_info *old_vlan_info; - struct hclge_dev *hdev = vport->back; int ret; old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; @@ -10503,38 +10104,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) goto out; - if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { - /* add new VLAN tag */ - ret = hclge_set_vlan_filter_hw(hdev, - htons(vlan_info->vlan_proto), - vport->vport_id, - vlan_info->vlan_tag, - false); - if (ret) - return ret; - - /* remove old VLAN tag */ - if (old_vlan_info->vlan_tag == 0) - ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, - true, 0); - else - ret = hclge_set_vlan_filter_hw(hdev, - htons(ETH_P_8021Q), - vport->vport_id, - old_vlan_info->vlan_tag, - true); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to clear vport%u port base vlan %u, ret = %d.\n", - vport->vport_id, old_vlan_info->vlan_tag, ret); - return ret; - } - - goto out; - } - - ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, - old_vlan_info); + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) + ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, + old_vlan_info); + else + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); if (ret) return ret; @@ -10881,11 +10456,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) { + struct hclge_comm_tqp *tqp; struct hnae3_queue *queue; - struct hclge_tqp *tqp; queue = handle->kinfo.tqp[queue_id]; - tqp = container_of(queue, struct hclge_tqp, q); + tqp = container_of(queue, struct hclge_comm_tqp, q); return tqp->index; } @@ -11442,10 +11017,11 @@ static int hclge_dev_mem_map(struct hclge_dev *hdev) if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) return 0; - hw->mem_base = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, HCLGE_MEM_BAR), - pci_resource_len(pdev, HCLGE_MEM_BAR)); - if (!hw->mem_base) { + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGE_MEM_BAR), + pci_resource_len(pdev, HCLGE_MEM_BAR)); + if (!hw->hw.mem_base) { dev_err(&pdev->dev, "failed to map device memory\n"); return -EFAULT; } @@ -11484,8 +11060,8 @@ static int hclge_pci_init(struct hclge_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->io_base = pcim_iomap(pdev, 2, 0); - if (!hw->io_base) { + hw->hw.io_base = pcim_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { dev_err(&pdev->dev, "Can't map configuration register space\n"); ret = -ENOMEM; goto err_clr_master; @@ -11500,7 +11076,7 @@ static int hclge_pci_init(struct hclge_dev *hdev) return 0; err_unmap_io_base: - pcim_iounmap(pdev, hdev->hw.io_base); + pcim_iounmap(pdev, hdev->hw.hw.io_base); err_clr_master: pci_clear_master(pdev); pci_release_regions(pdev); @@ -11514,10 +11090,10 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - if (hdev->hw.mem_base) - devm_iounmap(&pdev->dev, hdev->hw.mem_base); + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); - pcim_iounmap(pdev, hdev->hw.io_base); + pcim_iounmap(pdev, hdev->hw.hw.io_base); pci_free_irq_vectors(pdev); pci_clear_master(pdev); pci_release_mem_regions(pdev); @@ -11556,29 +11132,25 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, int retry_cnt = 0; int ret; -retry: - down(&hdev->reset_sem); - set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = rst_type; - ret = hclge_reset_prepare(hdev); - if (ret || hdev->reset_pending) { - dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", - ret); - if (hdev->reset_pending || - retry_cnt++ < HCLGE_RESET_RETRY_CNT) { - dev_err(&hdev->pdev->dev, - "reset_pending:0x%lx, retry_cnt:%d\n", - hdev->reset_pending, retry_cnt); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); - msleep(HCLGE_RESET_RETRY_WAIT_MS); - goto retry; - } + while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclge_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGE_RESET_RETRY_WAIT_MS); } /* disable misc vector before reset done */ hclge_enable_vector(&hdev->misc_vector, false); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); if (hdev->reset_type == HNAE3_FLR_RESET) hdev->rst_stats.flr_rst_cnt++; @@ -11683,12 +11255,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_pci_uninit; /* Firmware command queue initialize */ - ret = hclge_cmd_queue_init(hdev); + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_devlink_uninit; /* Firmware command initialize */ - ret = hclge_cmd_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); if (ret) goto err_cmd_uninit; @@ -11776,7 +11349,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } - ret = hclge_rss_init_cfg(hdev); + ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, + &hdev->rss_cfg); if (ret) { dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); goto err_mdiobus_unreg; @@ -11861,11 +11435,11 @@ err_msi_irq_uninit: err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_devlink_uninit: hclge_devlink_uninit(hdev); err_pci_uninit: - pcim_iounmap(pdev, hdev->hw.io_base); + pcim_iounmap(pdev, hdev->hw.hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); @@ -12112,7 +11686,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_reset_umv_space(hdev); } - ret = hclge_cmd_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, + true, hdev->reset_pending); if (ret) { dev_err(&pdev->dev, "Cmd queue init failed\n"); return ret; @@ -12252,7 +11827,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_config_nic_hw_error(hdev, false); hclge_config_rocee_ras_interrupt(hdev, false); - hclge_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); hclge_misc_irq_uninit(hdev); hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); @@ -12288,19 +11863,43 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->pf_rss_size_max; } +static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + struct hclge_dev *hdev = vport->back; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + unsigned int i; + + roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = vport->nic.kinfo.rss_size * i; + } + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, + tc_size); +} + static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, bool rxfh_configured) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; struct hclge_dev *hdev = vport->back; - u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; u16 cur_rss_size = kinfo->rss_size; u16 cur_tqps = kinfo->num_tqps; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 roundup_size; u32 *rss_indir; unsigned int i; int ret; @@ -12313,20 +11912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, return ret; } - roundup_size = roundup_pow_of_two(kinfo->rss_size); - roundup_size = ilog2(roundup_size); - /* Set the RSS TC mode according to the new RSS size */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; - - if (!(hdev->hw_tc_map & BIT(i))) - continue; - - tc_valid[i] = 1; - tc_size[i] = roundup_size; - tc_offset[i] = kinfo->rss_size * i; - } - ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + ret = hclge_set_rss_tc_mode_cfg(handle); if (ret) return ret; @@ -12508,7 +12094,7 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) { hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true); - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); } /* initialize the last command BD */ @@ -12552,7 +12138,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, hclge_cmd_setup_basic_desc(desc, cmd, true); for (i = 0; i < bd_num - 1; i++) { - desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); desc++; hclge_cmd_setup_basic_desc(desc, cmd, true); } @@ -12985,7 +12571,7 @@ static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, /* bd0~bd4 need next flag */ if (i < HCLGE_SFP_INFO_CMD_NUM - 1) - desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); } /* setup bd0, this bd contains offset and read length. */ @@ -13095,7 +12681,7 @@ static const struct hnae3_ae_ops hclge_ops = { .check_port_speed = hclge_check_port_speed, .get_fec = hclge_get_fec, .set_fec = hclge_set_fec, - .get_rss_key_size = hclge_get_rss_key_size, + .get_rss_key_size = hclge_comm_get_rss_key_size, .get_rss = hclge_get_rss, .set_rss = hclge_set_rss, .set_rss_tuple = hclge_set_rss_tuple, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ebba603483a0..adfb26e79262 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -13,6 +13,8 @@ #include "hclge_cmd.h" #include "hclge_ptp.h" #include "hnae3.h" +#include "hclge_comm_rss.h" +#include "hclge_comm_tqp_stats.h" #define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" @@ -38,20 +40,7 @@ #define HCLGE_VECTOR_REG_OFFSET_H 0x1000 #define HCLGE_VECTOR_VF_OFFSET 0x100000 -#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 #define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C -#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 - -#define HCLGE_CMDQ_INTR_STS_REG 0x27104 -#define HCLGE_CMDQ_INTR_EN_REG 0x27108 -#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C /* bar registers for common func */ #define HCLGE_GRO_EN_REG 0x28000 @@ -93,22 +82,6 @@ #define HCLGE_TQP_INTR_RL_REG 0x20900 #define HCLGE_RSS_IND_TBL_SIZE 512 -#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) -#define HCLGE_RSS_KEY_SIZE 40 -#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 -#define HCLGE_RSS_HASH_ALGO_SIMPLE 1 -#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 -#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) - -#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) -#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) -#define HCLGE_D_PORT_BIT BIT(0) -#define HCLGE_S_PORT_BIT BIT(1) -#define HCLGE_D_IP_BIT BIT(2) -#define HCLGE_S_IP_BIT BIT(3) -#define HCLGE_V_TAG_BIT BIT(4) -#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \ - (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT) #define HCLGE_RSS_TC_SIZE_0 1 #define HCLGE_RSS_TC_SIZE_1 2 @@ -228,7 +201,6 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_ERR_SERVICE_SCHED, HCLGE_STATE_STATISTICS_UPDATING, - HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_LINK_UPDATING, HCLGE_STATE_RST_FAIL, HCLGE_STATE_FD_TBL_CHANGED, @@ -294,31 +266,9 @@ struct hclge_mac { }; struct hclge_hw { - void __iomem *io_base; - void __iomem *mem_base; + struct hclge_comm_hw hw; struct hclge_mac mac; int num_vec; - struct hclge_cmq cmq; -}; - -/* TQP stats */ -struct hlcge_tqp_stats { - /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ - u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ - /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ - u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ -}; - -struct hclge_tqp { - /* copy of device pointer from pci_dev, - * used when perform DMA mapping - */ - struct device *dev; - struct hnae3_queue q; - struct hlcge_tqp_stats tqp_stats; - u16 index; /* Global index in a NIC controller */ - - bool alloced; }; enum hclge_fc_mode { @@ -641,6 +591,11 @@ struct key_info { #define MAX_FD_FILTER_NUM 4096 #define HCLGE_ARFS_EXPIRE_INTERVAL 5UL +#define hclge_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclge_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + enum HCLGE_FD_ACTIVE_RULE_TYPE { HCLGE_FD_RULE_NONE, HCLGE_FD_ARFS_ACTIVE, @@ -920,7 +875,7 @@ struct hclge_dev { bool cur_promisc; int num_alloc_vfs; /* Actual number of VFs allocated */ - struct hclge_tqp *htqp; + struct hclge_comm_tqp *htqp; struct hclge_vport *vport; struct dentry *hclge_dbgfs; @@ -955,6 +910,8 @@ struct hclge_dev { u16 hclge_fd_rule_num; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + unsigned long last_rst_scheduled; + unsigned long last_mbx_scheduled; unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; @@ -977,6 +934,7 @@ struct hclge_dev { cpumask_t affinity_mask; struct hclge_ptp *ptp; struct devlink *devlink; + struct hclge_comm_rss_cfg rss_cfg; }; /* VPort level vlan tag configuration for TX direction */ @@ -1003,17 +961,6 @@ struct hclge_rx_vtag_cfg { bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ }; -struct hclge_rss_tuple_cfg { - u8 ipv4_tcp_en; - u8 ipv4_udp_en; - u8 ipv4_sctp_en; - u8 ipv4_fragment_en; - u8 ipv6_tcp_en; - u8 ipv6_udp_en; - u8 ipv6_sctp_en; - u8 ipv6_fragment_en; -}; - enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_ALIVE, HCLGE_VPORT_STATE_MAC_TBL_CHANGE, @@ -1047,15 +994,6 @@ struct hclge_vf_info { struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ - u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ - /* User configured lookup table entries */ - u16 *rss_indirection_tbl; - int rss_algo; /* User configured hash algorithm */ - /* User configured rss tuple sets */ - struct hclge_rss_tuple_cfg rss_tuple_sets; - - u16 alloc_rss_size; - u16 qs_offset; u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; @@ -1093,6 +1031,11 @@ struct hclge_speed_bit_map { u32 speed_bit; }; +struct hclge_mac_speed_map { + u32 speed_drv; /* speed defined in driver */ + u32 speed_fw; /* speed defined in firmware */ +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -1111,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, static inline int hclge_get_queue_id(struct hnae3_queue *queue) { - struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); + struct hclge_comm_tqp *tqp = + container_of(queue, struct hclge_comm_tqp, q); return tqp->index; } @@ -1129,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); -void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); int hclge_reset_tqp(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 65d78ee4d65a..6799d16de34b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -4,6 +4,7 @@ #include "hclge_main.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "hclge_comm_rss.h" #define CREATE_TRACE_POINTS #include "hclge_trace.h" @@ -33,7 +34,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, { struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; struct hclge_dev *hdev = vport->back; - enum hclge_cmd_status status; + enum hclge_comm_cmd_status status; struct hclge_desc desc; u16 resp; @@ -90,7 +91,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, { struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; struct hclge_dev *hdev = vport->back; - enum hclge_cmd_status status; + enum hclge_comm_cmd_status status; struct hclge_desc desc; resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; @@ -181,7 +182,7 @@ static int hclge_get_ring_chain_from_mbx( if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", req->msg.param[i].tqp_index, - vport->nic.kinfo.rss_size - 1); + vport->nic.kinfo.rss_size - 1U); return -EINVAL; } } @@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport, { #define HCLGE_RSS_MBX_RESP_LEN 8 struct hclge_dev *hdev = vport->back; + struct hclge_comm_rss_cfg *rss_cfg; u8 index; index = mbx_req->msg.data[0]; + rss_cfg = &hdev->rss_cfg; /* Check the query index of rss_hash_key from VF, make sure no * more than the size of rss_hash_key. */ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > - sizeof(vport[0].rss_hash_key)) { + sizeof(rss_cfg->rss_hash_key)) { dev_warn(&hdev->pdev->dev, "failed to get the rss hash key, the index(%u) invalid !\n", index); @@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport, } memcpy(resp_msg->data, - &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; } @@ -661,9 +664,9 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev, static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { - u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); + u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); - return tail == hw->cmq.crq.next_to_use; + return tail == hw->hw.cmq.crq.next_to_use; } static void hclge_handle_ncsi_error(struct hclge_dev *hdev) @@ -694,7 +697,7 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport, void hclge_mbx_handler(struct hclge_dev *hdev) { - struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; + struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; struct hclge_respond_to_vf_msg resp_msg; struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_vport *vport; @@ -705,7 +708,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev) /* handle all the mailbox requests in the queue */ while (!hclge_cmd_crq_empty(&hdev->hw)) { - if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { dev_warn(&hdev->pdev->dev, "command queue needs re-initializing\n"); return; @@ -848,6 +852,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { resp_msg.status = ret; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "resp vport%u mbx(%u,%u) late\n", + req->mbx_src_vfid, + req->msg.code, + req->msg.subcode); + hclge_gen_resp_to_vf(vport, req, &resp_msg); } @@ -859,5 +871,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev) } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ - hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use); + hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, + crq->next_to_use); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 1231c34f0949..63d2be4349e3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -47,7 +47,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); @@ -85,7 +85,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) struct hclge_desc desc; int ret; - if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return 0; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index fd0e20190b90..4200d0b6d931 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -4,6 +4,10 @@ #ifndef __HCLGE_MDIO_H #define __HCLGE_MDIO_H +#include "hnae3.h" + +struct hclge_dev; + int hclge_mac_mdio_config(struct hclge_dev *hdev); int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index befa9bcc2f2f..a40b1583f114 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -464,7 +464,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) } spin_lock_init(&ptp->lock); - ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; hdev->ptp = ptp; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index 7a9b77de632a..bbee74cd8404 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -8,6 +8,9 @@ #include <linux/net_tstamp.h> #include <linux/types.h> +struct hclge_dev; +struct ifreq; + #define HCLGE_PTP_REG_OFFSET 0x29000 #define HCLGE_PTP_TX_TS_SEQID_REG 0x0 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 429652a8cde1..089f4444b7e3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) hclge_tm_update_kinfo_rss_size(vport); kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ - vport->alloc_rss_size = kinfo->rss_size; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, return 0; } -static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; + u16 i, k; int ret; - u32 i, k; - if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - /* Cfg qs -> pri mapping, one by one mapping */ - for (k = 0; k < hdev->num_alloc_vport; k++) { - struct hnae3_knic_private_info *kinfo = - &vport[k].nic.kinfo; - - for (i = 0; i < kinfo->tc_info.num_tc; i++) { - ret = hclge_tm_qs_to_pri_map_cfg( - hdev, vport[k].qs_offset + i, i); - if (ret) - return ret; - } + /* Cfg qs -> pri mapping, one by one mapping */ + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + i); + if (ret) + return ret; } - } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { - /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ - for (k = 0; k < hdev->num_alloc_vport; k++) - for (i = 0; i < HNAE3_MAX_TC; i++) { - ret = hclge_tm_qs_to_pri_map_cfg( - hdev, vport[k].qs_offset + i, k); - if (ret) - return ret; - } - } else { - return -EINVAL; } + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + k); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); + else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); + else + return -EINVAL; + + if (ret) + return ret; + /* Cfg q -> qs mapping */ for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_vport_q_to_qs_map(hdev, vport); @@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) return 0; } +static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u16 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_qs_schd_mode_cfg(hdev, + vport[i].qs_offset + pri_id, + HCLGE_SCH_MODE_DWRR); + if (ret) + return ret; + } + + return 0; +} + static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; - u8 i, k; + u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_tm_pri_schd_mode_cfg(hdev, i); + ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; - - for (k = 0; k < hdev->num_alloc_vport; k++) { - ret = hclge_tm_qs_schd_mode_cfg( - hdev, vport[k].qs_offset + i, - HCLGE_SCH_MODE_DWRR); - if (ret) - return ret; - } } } else { for (i = 0; i < hdev->num_alloc_vport; i++) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 1db7f40b4525..619cc30a2dfc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -6,6 +6,12 @@ #include <linux/types.h> +#include "hnae3.h" + +struct hclge_dev; +struct hclge_vport; +enum hclge_opcode_type; + /* MAC Pause */ #define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) #define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile deleted file mode 100644 index 51ff7d86ee90..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0+ -# -# Makefile for the HISILICON network device drivers. -# - -ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 -ccflags-y += -I $(srctree)/$(src) - -obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o -hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c deleted file mode 100644 index e605c2c5bcce..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ /dev/null @@ -1,556 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -// Copyright (c) 2016-2017 Hisilicon Limited. - -#include <linux/device.h> -#include <linux/dma-direction.h> -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include "hclgevf_cmd.h" -#include "hclgevf_main.h" -#include "hnae3.h" - -#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) - -static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) -{ - int ntc = ring->next_to_clean; - int ntu = ring->next_to_use; - int used; - - used = (ntu - ntc + ring->desc_num) % ring->desc_num; - - return ring->desc_num - used - 1; -} - -static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, - int head) -{ - int ntu = ring->next_to_use; - int ntc = ring->next_to_clean; - - if (ntu > ntc) - return head >= ntc && head <= ntu; - - return head >= ntc || head <= ntu; -} - -static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) -{ - struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); - struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - int clean; - u32 head; - - head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ - - if (!hclgevf_is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); - dev_warn(&hdev->pdev->dev, - "Disabling any further commands to IMP firmware\n"); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - return -EIO; - } - - clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; - csq->next_to_clean = head; - return clean; -} - -static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) -{ - u32 head; - - head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - - return head == hw->cmq.csq.next_to_use; -} - -static bool hclgevf_is_special_opcode(u16 opcode) -{ - const u16 spec_opcode[] = {0x30, 0x31, 0x32}; - int i; - - for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { - if (spec_opcode[i] == opcode) - return true; - } - - return false; -} - -static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) -{ - struct hclgevf_dev *hdev = ring->dev; - struct hclgevf_hw *hw = &hdev->hw; - u32 reg_val; - - if (ring->flag == HCLGEVF_TYPE_CSQ) { - reg_val = lower_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = upper_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - - reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); - reg_val &= HCLGEVF_NIC_SW_RST_RDY; - reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - } else { - reg_val = lower_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = upper_32_bits(ring->desc_dma_addr); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); - } -} - -static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw) -{ - hclgevf_cmd_config_regs(&hw->cmq.csq); - hclgevf_cmd_config_regs(&hw->cmq.crq); -} - -static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclgevf_desc); - - ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size, - &ring->desc_dma_addr, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - return 0; -} - -static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) -{ - int size = ring->desc_num * sizeof(struct hclgevf_desc); - - if (ring->desc) { - dma_free_coherent(cmq_ring_to_dev(ring), size, - ring->desc, ring->desc_dma_addr); - ring->desc = NULL; - } -} - -static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type) -{ - struct hclgevf_hw *hw = &hdev->hw; - struct hclgevf_cmq_ring *ring = - (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; - int ret; - - ring->dev = hdev; - ring->flag = ring_type; - - /* allocate CSQ/CRQ descriptor */ - ret = hclgevf_alloc_cmd_desc(ring); - if (ret) - dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, - (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); - - return ret; -} - -void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, - enum hclgevf_opcode_type opcode, bool is_read) -{ - memset(desc, 0, sizeof(struct hclgevf_desc)); - desc->opcode = cpu_to_le16(opcode); - desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR | - HCLGEVF_CMD_FLAG_IN); - if (is_read) - desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); -} - -struct vf_errcode { - u32 imp_errcode; - int common_errno; -}; - -static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num) -{ - struct hclgevf_desc *desc_to_use; - int handle = 0; - - while (handle < num) { - desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; - *desc_to_use = desc[handle]; - (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) - hw->cmq.csq.next_to_use = 0; - handle++; - } -} - -static int hclgevf_cmd_convert_err_code(u16 desc_ret) -{ - struct vf_errcode hclgevf_cmd_errcode[] = { - {HCLGEVF_CMD_EXEC_SUCCESS, 0}, - {HCLGEVF_CMD_NO_AUTH, -EPERM}, - {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP}, - {HCLGEVF_CMD_QUEUE_FULL, -EXFULL}, - {HCLGEVF_CMD_NEXT_ERR, -ENOSR}, - {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK}, - {HCLGEVF_CMD_PARA_ERR, -EINVAL}, - {HCLGEVF_CMD_RESULT_ERR, -ERANGE}, - {HCLGEVF_CMD_TIMEOUT, -ETIME}, - {HCLGEVF_CMD_HILINK_ERR, -ENOLINK}, - {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO}, - {HCLGEVF_CMD_INVALID, -EBADR}, - }; - u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode); - u32 i; - - for (i = 0; i < errcode_count; i++) - if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret) - return hclgevf_cmd_errcode[i].common_errno; - - return -EIO; -} - -static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num, int ntc) -{ - u16 opcode, desc_ret; - int handle; - - opcode = le16_to_cpu(desc[0].opcode); - for (handle = 0; handle < num; handle++) { - /* Get the result of hardware write back */ - desc[handle] = hw->cmq.csq.desc[ntc]; - ntc++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } - if (likely(!hclgevf_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[num - 1].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - hw->cmq.last_status = desc_ret; - - return hclgevf_cmd_convert_err_code(desc_ret); -} - -static int hclgevf_cmd_check_result(struct hclgevf_hw *hw, - struct hclgevf_desc *desc, int num, int ntc) -{ - struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; - bool is_completed = false; - u32 timeout = 0; - int handle, ret; - - /* If the command is sync, wait for the firmware to write back, - * if multi descriptors to be sent, use the first one to check - */ - if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) { - do { - if (hclgevf_cmd_csq_done(hw)) { - is_completed = true; - break; - } - udelay(1); - timeout++; - } while (timeout < hw->cmq.tx_timeout); - } - - if (!is_completed) - ret = -EBADE; - else - ret = hclgevf_cmd_check_retval(hw, desc, num, ntc); - - /* Clean the command send queue */ - handle = hclgevf_cmd_csq_clean(hw); - if (handle < 0) - ret = handle; - else if (handle != num) - dev_warn(&hdev->pdev->dev, - "cleaned %d, need to clean %d\n", handle, num); - return ret; -} - -/* hclgevf_cmd_send - send command to command queue - * @hw: pointer to the hw struct - * @desc: prefilled descriptor for describing the command - * @num : the number of descriptors to be sent - * - * This is the main send command for command queue, it - * sends the queue, cleans the queue, etc - */ -int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) -{ - struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; - struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - int ret; - int ntc; - - spin_lock_bh(&hw->cmq.csq.lock); - - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - if (num > hclgevf_ring_space(&hw->cmq.csq)) { - /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, - * need update the SW HEAD pointer csq->next_to_clean - */ - csq->next_to_clean = hclgevf_read_dev(hw, - HCLGEVF_NIC_CSQ_HEAD_REG); - spin_unlock_bh(&hw->cmq.csq.lock); - return -EBUSY; - } - - /* Record the location of desc in the ring for this time - * which will be use for hardware to write back - */ - ntc = hw->cmq.csq.next_to_use; - - hclgevf_cmd_copy_desc(hw, desc, num); - - /* Write to hardware */ - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, - hw->cmq.csq.next_to_use); - - ret = hclgevf_cmd_check_result(hw, desc, num, ntc); - - spin_unlock_bh(&hw->cmq.csq.lock); - - return ret; -} - -static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - - set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); -} - -static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = { - {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, - {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, - {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, - {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, - {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, - {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, -}; - -static void hclgevf_parse_capability(struct hclgevf_dev *hdev, - struct hclgevf_query_version_cmd *cmd) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps, i; - - caps = __le32_to_cpu(cmd->caps[0]); - for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++) - if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit)) - set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit, - ae_dev->caps); -} - -static __le32 hclgevf_build_api_caps(void) -{ - u32 api_caps = 0; - - hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1); - - return cpu_to_le32(api_caps); -} - -static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hclgevf_query_version_cmd *resp; - struct hclgevf_desc desc; - int status; - - resp = (struct hclgevf_query_version_cmd *)desc.data; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1); - resp->api_caps = hclgevf_build_api_caps(); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - return status; - - hdev->fw_version = le32_to_cpu(resp->firmware); - - ae_dev->dev_version = le32_to_cpu(resp->hardware) << - HNAE3_PCI_REVISION_BIT_SIZE; - ae_dev->dev_version |= hdev->pdev->revision; - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) - hclgevf_set_default_capability(hdev); - - hclgevf_parse_capability(hdev, resp); - - return status; -} - -int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) -{ - int ret; - - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - - hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; - hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - - ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CSQ ring setup error %d\n", ret); - return ret; - } - - ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ); - if (ret) { - dev_err(&hdev->pdev->dev, - "CRQ ring setup error %d\n", ret); - goto err_csq; - } - - return 0; -err_csq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - return ret; -} - -static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en) -{ - struct hclgevf_firmware_compat_cmd *req; - struct hclgevf_desc desc; - u32 compat = 0; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false); - - if (en) { - req = (struct hclgevf_firmware_compat_cmd *)desc.data; - - hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1); - - req->compat = cpu_to_le32(compat); - } - - return hclgevf_cmd_send(&hdev->hw, &desc, 1); -} - -int hclgevf_cmd_init(struct hclgevf_dev *hdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - int ret; - - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - - /* initialize the pointers of async rx queue of mailbox */ - hdev->arq.hdev = hdev; - hdev->arq.head = 0; - hdev->arq.tail = 0; - atomic_set(&hdev->arq.count, 0); - hdev->hw.cmq.csq.next_to_clean = 0; - hdev->hw.cmq.csq.next_to_use = 0; - hdev->hw.cmq.crq.next_to_clean = 0; - hdev->hw.cmq.crq.next_to_use = 0; - - hclgevf_cmd_init_regs(&hdev->hw); - - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - - /* Check if there is new reset pending, because the higher level - * reset may happen when lower level reset is being processed. - */ - if (hclgevf_is_reset_pending(hdev)) { - ret = -EBUSY; - goto err_cmd_init; - } - - /* get version and device capabilities */ - ret = hclgevf_cmd_query_version_and_capability(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to query version and capabilities, ret = %d\n", ret); - goto err_cmd_init; - } - - dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, - HNAE3_FW_VERSION_BYTE3_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, - HNAE3_FW_VERSION_BYTE2_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, - HNAE3_FW_VERSION_BYTE1_SHIFT), - hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, - HNAE3_FW_VERSION_BYTE0_SHIFT)); - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { - /* ask the firmware to enable some features, driver can work - * without it. - */ - ret = hclgevf_firmware_compat_config(hdev, true); - if (ret) - dev_warn(&hdev->pdev->dev, - "Firmware compatible features not enabled(%d).\n", - ret); - } - - return 0; - -err_cmd_init: - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - - return ret; -} - -static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) -{ - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); -} - -void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) -{ - hclgevf_firmware_compat_config(hdev, false); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - /* wait to ensure that the firmware completes the possible left - * over commands. - */ - msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); - spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock(&hdev->hw.cmq.crq.lock); - hclgevf_cmd_uninit_regs(&hdev->hw); - spin_unlock(&hdev->hw.cmq.crq.lock); - spin_unlock_bh(&hdev->hw.cmq.csq.lock); - - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index edc9e154061a..537b887fa0a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -6,9 +6,8 @@ #include <linux/io.h> #include <linux/types.h> #include "hnae3.h" +#include "hclge_comm_cmd.h" -#define HCLGEVF_CMDQ_TX_TIMEOUT 30000 -#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 @@ -16,107 +15,6 @@ struct hclgevf_hw; struct hclgevf_dev; #define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4 -struct hclgevf_firmware_compat_cmd { - __le32 compat; - u8 rsv[20]; -}; - -struct hclgevf_desc { - __le16 opcode; - __le16 flag; - __le16 retval; - __le16 rsv; - __le32 data[6]; -}; - -struct hclgevf_desc_cb { - dma_addr_t dma; - void *va; - u32 length; -}; - -struct hclgevf_cmq_ring { - dma_addr_t desc_dma_addr; - struct hclgevf_desc *desc; - struct hclgevf_desc_cb *desc_cb; - struct hclgevf_dev *dev; - u32 head; - u32 tail; - - u16 buf_size; - u16 desc_num; - int next_to_use; - int next_to_clean; - u8 flag; - spinlock_t lock; /* Command queue lock */ -}; - -enum hclgevf_cmd_return_status { - HCLGEVF_CMD_EXEC_SUCCESS = 0, - HCLGEVF_CMD_NO_AUTH = 1, - HCLGEVF_CMD_NOT_SUPPORTED = 2, - HCLGEVF_CMD_QUEUE_FULL = 3, - HCLGEVF_CMD_NEXT_ERR = 4, - HCLGEVF_CMD_UNEXE_ERR = 5, - HCLGEVF_CMD_PARA_ERR = 6, - HCLGEVF_CMD_RESULT_ERR = 7, - HCLGEVF_CMD_TIMEOUT = 8, - HCLGEVF_CMD_HILINK_ERR = 9, - HCLGEVF_CMD_QUEUE_ILLEGAL = 10, - HCLGEVF_CMD_INVALID = 11, -}; - -enum hclgevf_cmd_status { - HCLGEVF_STATUS_SUCCESS = 0, - HCLGEVF_ERR_CSQ_FULL = -1, - HCLGEVF_ERR_CSQ_TIMEOUT = -2, - HCLGEVF_ERR_CSQ_ERROR = -3 -}; - -struct hclgevf_cmq { - struct hclgevf_cmq_ring csq; - struct hclgevf_cmq_ring crq; - u16 tx_timeout; /* Tx timeout */ - enum hclgevf_cmd_status last_status; -}; - -#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0 -#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1 -#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2 -#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3 -#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4 -#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5 - -#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT) -#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT) -#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT) -#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT) -#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT) -#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT) - -enum hclgevf_opcode_type { - /* Generic command */ - HCLGEVF_OPC_QUERY_FW_VER = 0x0001, - HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024, - HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050, - - /* TQP command */ - HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, - HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, - HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, - /* GRO command */ - HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10, - /* RSS cmd */ - HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, - HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, - HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, - HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, - /* Mailbox cmd */ - HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, - - /* IMP stats command */ - HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A, -}; #define HCLGEVF_TQP_REG_OFFSET 0x80000 #define HCLGEVF_TQP_REG_SIZE 0x200 @@ -156,34 +54,6 @@ struct hclgevf_ctrl_vector_chain { u8 resv; }; -enum HCLGEVF_CAP_BITS { - HCLGEVF_CAP_UDP_GSO_B, - HCLGEVF_CAP_QB_B, - HCLGEVF_CAP_FD_FORWARD_TC_B, - HCLGEVF_CAP_PTP_B, - HCLGEVF_CAP_INT_QL_B, - HCLGEVF_CAP_HW_TX_CSUM_B, - HCLGEVF_CAP_TX_PUSH_B, - HCLGEVF_CAP_PHY_IMP_B, - HCLGEVF_CAP_TQP_TXRX_INDEP_B, - HCLGEVF_CAP_HW_PAD_B, - HCLGEVF_CAP_STASH_B, - HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, - HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15, -}; - -enum HCLGEVF_API_CAP_BITS { - HCLGEVF_API_CAP_FLEX_RSS_TBL_B, -}; - -#define HCLGEVF_QUERY_CAP_LENGTH 3 -struct hclgevf_query_version_cmd { - __le32 firmware; - __le32 hardware; - __le32 api_caps; - __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */ -}; - #define HCLGEVF_MSIX_OFT_ROCEE_S 0 #define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S) #define HCLGEVF_VEC_NUM_S 0 @@ -203,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd { u8 rsv[23]; }; -#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 -#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 -#define HCLGEVF_RSS_HASH_KEY_NUM 16 -struct hclgevf_rss_config_cmd { - u8 hash_config; - u8 rsv[7]; - u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM]; -}; - -struct hclgevf_rss_input_tuple_cmd { - u8 ipv4_tcp_en; - u8 ipv4_udp_en; - u8 ipv4_sctp_en; - u8 ipv4_fragment_en; - u8 ipv6_tcp_en; - u8 ipv6_udp_en; - u8 ipv6_sctp_en; - u8 ipv6_fragment_en; - u8 rsv[16]; -}; - -#define HCLGEVF_RSS_CFG_TBL_SIZE 16 - -struct hclgevf_rss_indirection_table_cmd { - __le16 start_table_index; - __le16 rss_set_bitmap; - u8 rsv[4]; - u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE]; -}; - -#define HCLGEVF_RSS_TC_OFFSET_S 0 -#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0) -#define HCLGEVF_RSS_TC_SIZE_MSB_B 11 -#define HCLGEVF_RSS_TC_SIZE_S 12 -#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12) -#define HCLGEVF_RSS_TC_VALID_B 15 -#define HCLGEVF_MAX_TC_NUM 8 -#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3 - -struct hclgevf_rss_tc_mode_cmd { - __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM]; - u8 rsv[8]; -}; - #define HCLGEVF_LINK_STS_B 0 #define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B) struct hclgevf_link_status_cmd { @@ -273,9 +99,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { u8 rsv[14]; }; -#define HCLGEVF_TYPE_CRQ 0 -#define HCLGEVF_TYPE_CSQ 1 - /* this bit indicates that the driver is ready for hardware reset */ #define HCLGEVF_NIC_SW_RST_RDY_B 16 #define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B) @@ -285,6 +108,9 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4 +#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \ + hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read) + struct hclgevf_dev_specs_0_cmd { __le32 rsv0; __le32 mac_entry_num; @@ -305,38 +131,6 @@ struct hclgevf_dev_specs_1_cmd { u8 rsv1[18]; }; -/* capabilities bits map between imp firmware and local driver */ -struct hclgevf_caps_bit_map { - u16 imp_bit; - u16 local_bit; -}; - -static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) -{ - writel(value, base + reg); -} - -static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) -{ - u8 __iomem *reg_addr = READ_ONCE(base); - - return readl(reg_addr + reg); -} - -#define hclgevf_write_dev(a, reg, value) \ - hclgevf_write_reg((a)->io_base, reg, value) -#define hclgevf_read_dev(a, reg) \ - hclgevf_read_reg((a)->io_base, reg) - -#define HCLGEVF_SEND_SYNC(flag) \ - ((flag) & HCLGEVF_CMD_FLAG_NO_INTR) - -int hclgevf_cmd_init(struct hclgevf_dev *hdev); -void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); -int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); - -int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); -void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, - enum hclgevf_opcode_type opcode, - bool is_read); +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num); +void hclgevf_arq_init(struct hclgevf_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 25c419d40066..7df87610ad96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -9,6 +9,7 @@ #include "hclge_mbx.h" #include "hnae3.h" #include "hclgevf_devlink.h" +#include "hclge_comm_rss.h" #define HCLGEVF_NAME "hclgevf" @@ -30,30 +31,22 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; -static const u8 hclgevf_hash_key[] = { - 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, - 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, - 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, - 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, - 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA -}; - MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG, - HCLGEVF_NIC_CSQ_BASEADDR_H_REG, - HCLGEVF_NIC_CSQ_DEPTH_REG, - HCLGEVF_NIC_CSQ_TAIL_REG, - HCLGEVF_NIC_CSQ_HEAD_REG, - HCLGEVF_NIC_CRQ_BASEADDR_L_REG, - HCLGEVF_NIC_CRQ_BASEADDR_H_REG, - HCLGEVF_NIC_CRQ_DEPTH_REG, - HCLGEVF_NIC_CRQ_TAIL_REG, - HCLGEVF_NIC_CRQ_HEAD_REG, - HCLGEVF_VECTOR0_CMDQ_SRC_REG, - HCLGEVF_VECTOR0_CMDQ_STATE_REG, - HCLGEVF_CMDQ_INTR_EN_REG, - HCLGEVF_CMDQ_INTR_GEN_REG}; +static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CSQ_DEPTH_REG, + HCLGE_COMM_NIC_CSQ_TAIL_REG, + HCLGE_COMM_NIC_CSQ_HEAD_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, + HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, + HCLGE_COMM_NIC_CRQ_DEPTH_REG, + HCLGE_COMM_NIC_CRQ_TAIL_REG, + HCLGE_COMM_NIC_CRQ_HEAD_REG, + HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG, + HCLGE_COMM_CMDQ_INTR_EN_REG, + HCLGE_COMM_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, HCLGEVF_RST_ING, @@ -92,109 +85,40 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, HCLGEVF_TQP_INTR_GL2_REG, HCLGEVF_TQP_INTR_RL_REG}; -static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) +/* hclgevf_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + */ +int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) { - if (!handle->client) - return container_of(handle, struct hclgevf_dev, nic); - else if (handle->client->type == HNAE3_CLIENT_ROCE) - return container_of(handle, struct hclgevf_dev, roce); - else - return container_of(handle, struct hclgevf_dev, nic); + return hclge_comm_cmd_send(&hw->hw, desc, num); } -static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) +void hclgevf_arq_init(struct hclgevf_dev *hdev) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_desc desc; - struct hclgevf_tqp *tqp; - int status; - int i; + struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_QUERY_RX_STATUS, - true); - - desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - status, i); - return status; - } - tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc.data[1]); - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, - true); - - desc.data[0] = cpu_to_le32(tqp->index & 0x1ff); - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "Query tqp stat fail, status = %d,queue = %d\n", - status, i); - return status; - } - tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc.data[1]); - } - - return 0; + spin_lock(&cmdq->crq.lock); + /* initialize the pointers of async rx queue of mailbox */ + hdev->arq.hdev = hdev; + hdev->arq.head = 0; + hdev->arq.tail = 0; + atomic_set(&hdev->arq.count, 0); + spin_unlock(&cmdq->crq.lock); } -static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_tqp *tqp; - u64 *buff = data; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; - } - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; - } - - return buff; -} - -static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - - return kinfo->num_tqps * 2; -} - -static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - u8 *buff = data; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", - tqp->index); - buff += ETH_GSTRING_LEN; - } - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i], - struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", - tqp->index); - buff += ETH_GSTRING_LEN; - } - - return buff; + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); } static void hclgevf_update_stats(struct hnae3_handle *handle, @@ -203,7 +127,7 @@ static void hclgevf_update_stats(struct hnae3_handle *handle, struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int status; - status = hclgevf_tqps_update_stats(handle); + status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); if (status) dev_err(&hdev->pdev->dev, "VF update of TQPS stats fail, status = %d.\n", @@ -215,7 +139,7 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) if (strset == ETH_SS_TEST) return -EOPNOTSUPP; else if (strset == ETH_SS_STATS) - return hclgevf_tqps_get_sset_count(handle, strset); + return hclge_comm_tqps_get_sset_count(handle); return 0; } @@ -226,12 +150,12 @@ static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, u8 *p = (char *)data; if (strset == ETH_SS_STATS) - p = hclgevf_tqps_get_strings(handle, p); + p = hclge_comm_tqps_get_strings(handle, p); } static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) { - hclgevf_tqps_get_stats(handle, data); + hclge_comm_tqps_get_stats(handle, data); } static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, @@ -397,11 +321,11 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { - struct hclgevf_tqp *tqp; + struct hclge_comm_tqp *tqp; int i; hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, - sizeof(struct hclgevf_tqp), GFP_KERNEL); + sizeof(struct hclge_comm_tqp), GFP_KERNEL); if (!hdev->htqp) return -ENOMEM; @@ -420,11 +344,11 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) * HCLGEVF_TQP_MAX_SIZE_DEV_V2. */ if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGEVF_TQP_REG_OFFSET + i * HCLGEVF_TQP_REG_SIZE; else - tqp->q.io_base = hdev->hw.io_base + + tqp->q.io_base = hdev->hw.hw.io_base + HCLGEVF_TQP_REG_OFFSET + HCLGEVF_TQP_EXT_REG_OFFSET + (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * @@ -448,7 +372,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) kinfo->num_tx_desc = hdev->num_tx_desc; kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) + for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) num_tc++; @@ -539,7 +463,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; - nic->kinfo.io_base = hdev->hw.io_base; + nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclgevf_knic_setup(hdev); if (ret) @@ -576,7 +500,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { vector->vector = pci_irq_vector(hdev->pdev, i); - vector->io_addr = hdev->hw.io_base + + vector->io_addr = hdev->hw.hw.io_base + HCLGEVF_VECTOR_REG_BASE + (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; hdev->vector_status[i] = 0; @@ -606,137 +530,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } -static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, - const u8 hfunc, const u8 *key) -{ - struct hclgevf_rss_config_cmd *req; - unsigned int key_offset = 0; - struct hclgevf_desc desc; - int key_counts; - int key_size; - int ret; - - key_counts = HCLGEVF_RSS_KEY_SIZE; - req = (struct hclgevf_rss_config_cmd *)desc.data; - - while (key_counts) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - false); - - req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); - req->hash_config |= - (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); - - key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts); - memcpy(req->hash_key, - key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); - - key_counts -= key_size; - key_offset++; - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Configure RSS config fail, status = %d\n", - ret); - return ret; - } - } - - return 0; -} - -static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) -{ - return HCLGEVF_RSS_KEY_SIZE; -} - -static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev) -{ - const u8 *indir = hdev->rss_cfg.rss_indirection_tbl; - struct hclgevf_rss_indirection_table_cmd *req; - struct hclgevf_desc desc; - int rss_cfg_tbl_num; - int status; - int i, j; - - req = (struct hclgevf_rss_indirection_table_cmd *)desc.data; - rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size / - HCLGEVF_RSS_CFG_TBL_SIZE; - - for (i = 0; i < rss_cfg_tbl_num; i++) { - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE, - false); - req->start_table_index = - cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE); - req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK); - for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++) - req->rss_result[j] = - indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j]; - - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "VF failed(=%d) to set RSS indirection table\n", - status); - return status; - } - } - - return 0; -} - -static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) -{ - struct hclgevf_rss_tc_mode_cmd *req; - u16 tc_offset[HCLGEVF_MAX_TC_NUM]; - u16 tc_valid[HCLGEVF_MAX_TC_NUM]; - u16 tc_size[HCLGEVF_MAX_TC_NUM]; - struct hclgevf_desc desc; - u16 roundup_size; - unsigned int i; - int status; - - req = (struct hclgevf_rss_tc_mode_cmd *)desc.data; - - roundup_size = roundup_pow_of_two(rss_size); - roundup_size = ilog2(roundup_size); - - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i)); - tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; - } - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); - for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - u16 mode = 0; - - hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B, - tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET & - 0x1); - hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); - - req->rss_tc_mode[i] = cpu_to_le16(mode); - } - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "VF failed(=%d) to set rss tc mode\n", status); - - return status; -} - /* for revision 0x20, vf shared the same rss config with pf */ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) { #define HCLGEVF_RSS_MBX_RESP_LEN 8 - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; struct hclge_vf_to_pf_msg send_msg; u16 msg_num, hash_key_index; @@ -744,7 +542,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); - msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / HCLGEVF_RSS_MBX_RESP_LEN; for (index = 0; index < msg_num; index++) { send_msg.data[0] = index; @@ -761,7 +559,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) if (index == msg_num - 1) memcpy(&rss_cfg->rss_hash_key[hash_key_index], &resp_msg[0], - HCLGEVF_RSS_KEY_SIZE - hash_key_index); + HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); else memcpy(&rss_cfg->rss_hash_key[hash_key_index], &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); @@ -774,29 +572,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i, ret; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret; if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - /* Get hash algorithm */ - if (hfunc) { - switch (rss_cfg->hash_algo) { - case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: - *hfunc = ETH_RSS_HASH_TOP; - break; - case HCLGEVF_RSS_HASH_ALGO_SIMPLE: - *hfunc = ETH_RSS_HASH_XOR; - break; - default: - *hfunc = ETH_RSS_HASH_UNKNOWN; - break; - } - } - - /* Get the RSS Key required by the user */ - if (key) - memcpy(key, rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); } else { if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -805,67 +585,28 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, if (ret) return ret; memcpy(key, rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + HCLGE_COMM_RSS_KEY_SIZE); } } - if (indir) - for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) - indir[i] = rss_cfg->rss_indirection_tbl[i]; + hclge_comm_get_rss_indir_tbl(rss_cfg, indir, + hdev->ae_dev->dev_specs.rss_ind_tbl_size); return 0; } -static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc, - u8 *hash_algo) -{ - switch (hfunc) { - case ETH_RSS_HASH_TOP: - *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - return 0; - case ETH_RSS_HASH_XOR: - *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; - return 0; - case ETH_RSS_HASH_NO_CHANGE: - *hash_algo = hdev->rss_cfg.hash_algo; - return 0; - default: - return -EINVAL; - } -} - static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, const u8 *key, const u8 hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - u8 hash_algo; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; int ret, i; if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo); + ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, + hfunc); if (ret) return ret; - - /* Set the RSS Hash Key if specififed by the user */ - if (key) { - ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); - if (ret) { - dev_err(&hdev->pdev->dev, - "invalid hfunc type %u\n", hfunc); - return ret; - } - - /* Update the shadow RSS key with user specified qids */ - memcpy(rss_cfg->rss_hash_key, key, - HCLGEVF_RSS_KEY_SIZE); - } else { - ret = hclgevf_set_rss_algo_key(hdev, hash_algo, - rss_cfg->rss_hash_key); - if (ret) - return ret; - } - rss_cfg->hash_algo = hash_algo; } /* update the shadow RSS table with user specified qids */ @@ -873,179 +614,26 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, rss_cfg->rss_indirection_tbl[i] = indir[i]; /* update the hardware */ - return hclgevf_set_rss_indir_table(hdev); -} - -static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) -{ - u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; - - if (nfc->data & RXH_L4_B_2_3) - hash_sets |= HCLGEVF_D_PORT_BIT; - else - hash_sets &= ~HCLGEVF_D_PORT_BIT; - - if (nfc->data & RXH_IP_SRC) - hash_sets |= HCLGEVF_S_IP_BIT; - else - hash_sets &= ~HCLGEVF_S_IP_BIT; - - if (nfc->data & RXH_IP_DST) - hash_sets |= HCLGEVF_D_IP_BIT; - else - hash_sets &= ~HCLGEVF_D_IP_BIT; - - if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) - hash_sets |= HCLGEVF_V_TAG_BIT; - - return hash_sets; -} - -static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle, - struct ethtool_rxnfc *nfc, - struct hclgevf_rss_input_tuple_cmd *req) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - u8 tuple_sets; - - req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; - - tuple_sets = hclgevf_get_rss_hash_bits(nfc); - switch (nfc->flow_type) { - case TCP_V4_FLOW: - req->ipv4_tcp_en = tuple_sets; - break; - case TCP_V6_FLOW: - req->ipv6_tcp_en = tuple_sets; - break; - case UDP_V4_FLOW: - req->ipv4_udp_en = tuple_sets; - break; - case UDP_V6_FLOW: - req->ipv6_udp_en = tuple_sets; - break; - case SCTP_V4_FLOW: - req->ipv4_sctp_en = tuple_sets; - break; - case SCTP_V6_FLOW: - if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && - (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) - return -EINVAL; - - req->ipv6_sctp_en = tuple_sets; - break; - case IPV4_FLOW: - req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - break; - case IPV6_FLOW: - req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - break; - default: - return -EINVAL; - } - - return 0; + return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); } static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, struct ethtool_rxnfc *nfc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; int ret; if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) return -EOPNOTSUPP; - if (nfc->data & - ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); - - ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to init rss tuple cmd, ret = %d\n", ret); - return ret; - } - - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, + &hdev->rss_cfg, nfc); + if (ret) dev_err(&hdev->pdev->dev, - "Set rss tuple fail, status = %d\n", ret); - return ret; - } - - rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; - rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; - rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; - rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; - rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; - rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; - rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; - rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; - return 0; -} - -static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev, - int flow_type, u8 *tuple_sets) -{ - switch (flow_type) { - case TCP_V4_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en; - break; - case UDP_V4_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en; - break; - case TCP_V6_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en; - break; - case UDP_V6_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en; - break; - case SCTP_V4_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en; - break; - case SCTP_V6_FLOW: - *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en; - break; - case IPV4_FLOW: - case IPV6_FLOW: - *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; - break; - default: - return -EINVAL; - } - - return 0; -} - -static u64 hclgevf_convert_rss_tuple(u8 tuple_sets) -{ - u64 tuple_data = 0; - - if (tuple_sets & HCLGEVF_D_PORT_BIT) - tuple_data |= RXH_L4_B_2_3; - if (tuple_sets & HCLGEVF_S_PORT_BIT) - tuple_data |= RXH_L4_B_0_1; - if (tuple_sets & HCLGEVF_D_IP_BIT) - tuple_data |= RXH_IP_DST; - if (tuple_sets & HCLGEVF_S_IP_BIT) - tuple_data |= RXH_IP_SRC; + "failed to set rss tuple, ret = %d.\n", ret); - return tuple_data; + return ret; } static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, @@ -1060,47 +648,20 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, nfc->data = 0; - ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type, - &tuple_sets); + ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, + &tuple_sets); if (ret || !tuple_sets) return ret; - nfc->data = hclgevf_convert_rss_tuple(tuple_sets); + nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); return 0; } -static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, - struct hclgevf_rss_cfg *rss_cfg) -{ - struct hclgevf_rss_input_tuple_cmd *req; - struct hclgevf_desc desc; - int ret; - - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); - - req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; - - req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; - req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; - req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; - req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; - req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; - req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; - req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; - req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; - - ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Configure rss input fail, status = %d\n", ret); - return ret; -} - static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; return rss_cfg->rss_size; } @@ -1273,12 +834,11 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, u16 stream_id, bool enable) { struct hclgevf_cfg_com_tqp_queue_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE, - false); + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); req->stream_id = cpu_to_le16(stream_id); if (enable) @@ -1302,18 +862,6 @@ static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) return 0; } -static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hclgevf_tqp *tqp; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); - memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); - } -} - static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) { struct hclge_vf_to_pf_msg send_msg; @@ -1514,15 +1062,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, struct list_head *list, enum HCLGEVF_MAC_ADDR_TYPE mac_type) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclgevf_mac_addr_node *mac_node, *tmp; int ret; list_for_each_entry_safe(mac_node, tmp, list, node) { ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); dev_err(&hdev->pdev->dev, - "failed to configure mac %pM, state = %d, ret = %d\n", - mac_node->mac_addr, mac_node->state, ret); + "failed to configure mac %s, state = %d, ret = %d\n", + format_mac_addr, mac_node->state, ret); return; } if (mac_node->state == HCLGEVF_MAC_TO_ADD) { @@ -1859,13 +1410,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) int ret; if (hdev->reset_type == HNAE3_VF_RESET) - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_VF_RST_ING, val, !(val & HCLGEVF_VF_RST_ING_BIT), HCLGEVF_RESET_WAIT_US, HCLGEVF_RESET_WAIT_TIMEOUT_US); else - ret = readl_poll_timeout(hdev->hw.io_base + + ret = readl_poll_timeout(hdev->hw.hw.io_base + HCLGEVF_RST_ING, val, !(val & HCLGEVF_RST_ING_BITS), HCLGEVF_RESET_WAIT_US, @@ -1891,13 +1442,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) { u32 reg_val; - reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG); + reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); if (enable) reg_val |= HCLGEVF_NIC_SW_RST_RDY; else reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG, + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); } @@ -1948,7 +1499,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) hdev->rst_stats.vf_func_rst_cnt++; } - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); /* inform hardware that preparatory work is done */ msleep(HCLGEVF_RESET_SYNC_TIME); hclgevf_reset_handshake(hdev, true); @@ -1977,9 +1528,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -2163,24 +1714,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, int retry_cnt = 0; int ret; -retry: - down(&hdev->reset_sem); - set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = rst_type; - ret = hclgevf_reset_prepare(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", - ret); - if (hdev->reset_pending || - retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { - dev_err(&hdev->pdev->dev, - "reset_pending:0x%lx, retry_cnt:%d\n", - hdev->reset_pending, retry_cnt); - clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); - msleep(HCLGEVF_RESET_RETRY_WAIT_MS); - goto retry; - } + while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclgevf_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); } /* disable misc vector before reset done */ @@ -2220,7 +1767,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) vector->vector_irq = pci_irq_vector(hdev->pdev, HCLGEVF_MISC_VECTOR_NUM); - vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; + vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; /* vector status always valid for Vector 0 */ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; @@ -2341,7 +1888,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev) struct hclge_vf_to_pf_msg send_msg; int ret; - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) return; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); @@ -2378,7 +1925,7 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) } if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) - hclgevf_tqps_update_stats(handle); + hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); /* VF does not need to request link status when this bit is set, because * PF will push its link status to VFs when link status changed. @@ -2419,7 +1966,7 @@ static void hclgevf_service_task(struct work_struct *work) static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) { - hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); } static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, @@ -2429,14 +1976,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, /* fetch the events from their corresponding regs */ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, - HCLGEVF_VECTOR0_CMDQ_STATE_REG); + HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); dev_info(&hdev->pdev->dev, "receive reset interrupt 0x%x!\n", rst_ing_reg); set_bit(HNAE3_VF_RESET, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); hdev->rst_stats.vf_rst_cnt++; /* set up VF hardware reset status, its PF will clear @@ -2560,8 +2107,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) roce->rinfo.base_vector = hdev->roce_base_msix_offset; roce->rinfo.netdev = nic->kinfo.netdev; - roce->rinfo.roce_io_base = hdev->hw.io_base; - roce->rinfo.roce_mem_base = hdev->hw.mem_base; + roce->rinfo.roce_io_base = hdev->hw.hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; @@ -2573,13 +2120,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; if (!hnae3_dev_gro_supported(hdev)) return 0; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; @@ -2593,71 +2140,37 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev) return ret; } -static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) -{ - u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size; - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - struct hclgevf_rss_tuple_cfg *tuple_sets; - u32 i; - - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - rss_cfg->rss_size = hdev->nic.kinfo.rss_size; - tuple_sets = &rss_cfg->rss_tuple_sets; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - u8 *rss_ind_tbl; - - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; - - rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size, - sizeof(*rss_ind_tbl), GFP_KERNEL); - if (!rss_ind_tbl) - return -ENOMEM; - - rss_cfg->rss_indirection_tbl = rss_ind_tbl; - memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, - HCLGEVF_RSS_KEY_SIZE); - - tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; - tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - tuple_sets->ipv6_sctp_en = - hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? - HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : - HCLGEVF_RSS_INPUT_TUPLE_SCTP; - tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; - } - - /* Initialize RSS indirect table */ - for (i = 0; i < rss_ind_tbl_size; i++) - rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; - - return 0; -} - static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; int ret; if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, - rss_cfg->rss_hash_key); + ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, + rss_cfg->rss_algo, + rss_cfg->rss_hash_key); if (ret) return ret; - ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw, + false, rss_cfg); if (ret) return ret; } - ret = hclgevf_set_rss_indir_table(hdev); + ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, + rss_cfg->rss_indirection_tbl); if (ret) return ret; - return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size); + hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + + return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); } static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) @@ -2711,7 +2224,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); - hclgevf_reset_tqp_stats(handle); + hclge_comm_reset_tqp_stats(handle); hclgevf_request_link_info(hdev); @@ -2729,7 +2242,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) if (hdev->reset_type != HNAE3_VF_RESET) hclgevf_reset_tqp(handle); - hclgevf_reset_tqp_stats(handle); + hclge_comm_reset_tqp_stats(handle); hclgevf_update_link_status(hdev, 0); } @@ -3043,11 +2556,11 @@ static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) return 0; - hw->mem_base = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, - HCLGEVF_MEM_BAR), - pci_resource_len(pdev, HCLGEVF_MEM_BAR)); - if (!hw->mem_base) { + hw->hw.mem_base = + devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGEVF_MEM_BAR), + pci_resource_len(pdev, HCLGEVF_MEM_BAR)); + if (!hw->hw.mem_base) { dev_err(&pdev->dev, "failed to map device memory\n"); return -EFAULT; } @@ -3081,9 +2594,8 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->hdev = hdev; - hw->io_base = pci_iomap(pdev, 2, 0); - if (!hw->io_base) { + hw->hw.io_base = pci_iomap(pdev, 2, 0); + if (!hw->hw.io_base) { dev_err(&pdev->dev, "can't map configuration register space\n"); ret = -ENOMEM; goto err_clr_master; @@ -3096,7 +2608,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) return 0; err_unmap_io_base: - pci_iounmap(pdev, hdev->hw.io_base); + pci_iounmap(pdev, hdev->hw.hw.io_base); err_clr_master: pci_clear_master(pdev); pci_release_regions(pdev); @@ -3110,10 +2622,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - if (hdev->hw.mem_base) - devm_iounmap(&pdev->dev, hdev->hw.mem_base); + if (hdev->hw.hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); - pci_iounmap(pdev, hdev->hw.io_base); + pci_iounmap(pdev, hdev->hw.hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); @@ -3122,10 +2634,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) { struct hclgevf_query_res_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int ret; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true); + hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -3179,13 +2691,13 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) ae_dev->dev_specs.max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; - ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; + ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; } static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, - struct hclgevf_desc *desc) + struct hclge_desc *desc) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclgevf_dev_specs_0_cmd *req0; @@ -3212,7 +2724,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) if (!dev_specs->rss_ind_tbl_size) dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; if (!dev_specs->rss_key_size) - dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; + dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; if (!dev_specs->max_int_gl) dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) @@ -3221,7 +2733,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) { - struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; + struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; int ret; int i; @@ -3235,11 +2747,10 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { hclgevf_cmd_setup_basic_desc(&desc[i], - HCLGEVF_OPC_QUERY_DEV_SPECS, true); - desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT); + HCLGE_OPC_QUERY_DEV_SPECS, true); + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); } - hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS, - true); + hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); if (ret) @@ -3318,7 +2829,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) { dev_err(&pdev->dev, "cmd failed %d\n", ret); return ret; @@ -3364,11 +2878,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_devlink_init; - ret = hclgevf_cmd_queue_init(hdev); + ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_cmd_queue_init; - ret = hclgevf_cmd_init(hdev); + hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, + &hdev->fw_version, false, + hdev->reset_pending); if (ret) goto err_cmd_init; @@ -3421,7 +2938,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; /* Initialize RSS for this VF */ - ret = hclgevf_rss_init_cfg(hdev); + ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, + &hdev->rss_cfg); if (ret) { dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); goto err_config; @@ -3468,7 +2986,7 @@ err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); err_cmd_init: - hclgevf_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_cmd_queue_init: hclgevf_devlink_uninit(hdev); err_devlink_init: @@ -3492,7 +3010,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) hclgevf_uninit_msi(hdev); } - hclgevf_cmd_uninit(hdev); + hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); @@ -3595,6 +3113,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; + u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; u16 cur_rss_size = kinfo->rss_size; u16 cur_tqps = kinfo->num_tqps; u32 *rss_indir; @@ -3603,7 +3124,10 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, hclgevf_update_rss_size(handle, new_tqps_num); - ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size); + hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map, + tc_offset, tc_valid, tc_size); + ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, + tc_valid, tc_size); if (ret) return ret; @@ -3704,7 +3228,7 @@ static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); } static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) @@ -3862,7 +3386,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, .get_sset_count = hclgevf_get_sset_count, - .get_rss_key_size = hclgevf_get_rss_key_size, + .get_rss_key_size = hclge_comm_get_rss_key_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, .get_rss_tuple = hclgevf_get_rss_tuple, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index f6f736c0091c..502ca1ce1a90 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -10,6 +10,8 @@ #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" +#include "hclge_comm_rss.h" +#include "hclge_comm_tqp_stats.h" #define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_DRIVER_NAME "hclgevf" @@ -32,21 +34,6 @@ #define HCLGEVF_VECTOR_REG_OFFSET 0x4 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 -/* bar registers for cmdq */ -#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C -#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 - -#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 -#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C - /* bar registers for common func */ #define HCLGEVF_GRO_EN_REG 0x28000 #define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008 @@ -86,10 +73,6 @@ #define HCLGEVF_TQP_INTR_GL2_REG 0x20300 #define HCLGEVF_TQP_INTR_RL_REG 0x20900 -/* Vector0 interrupt CMDQ event source register(RW) */ -#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 -/* Vector0 interrupt CMDQ event status register(RO) */ -#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 /* RST register bits for RESET event */ @@ -112,27 +95,16 @@ #define HCLGEVF_WAIT_RESET_DONE 100 #define HCLGEVF_RSS_IND_TBL_SIZE 512 -#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff -#define HCLGEVF_RSS_KEY_SIZE 40 -#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0 -#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1 -#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2 -#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf - -#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) -#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) -#define HCLGEVF_D_PORT_BIT BIT(0) -#define HCLGEVF_S_PORT_BIT BIT(1) -#define HCLGEVF_D_IP_BIT BIT(2) -#define HCLGEVF_S_IP_BIT BIT(3) -#define HCLGEVF_V_TAG_BIT BIT(4) -#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \ - (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT) #define HCLGEVF_MAC_MAX_FRAME 9728 #define HCLGEVF_STATS_TIMER_INTERVAL 36U +#define hclgevf_read_dev(a, reg) \ + hclge_comm_read_reg((a)->hw.io_base, reg) +#define hclgevf_write_dev(a, reg, value) \ + hclge_comm_write_reg((a)->hw.io_base, reg, value) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -154,7 +126,6 @@ enum hclgevf_states { HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, - HCLGEVF_STATE_CMD_DISABLE, HCLGEVF_STATE_LINK_UPDATING, HCLGEVF_STATE_PROMISC_CHANGED, HCLGEVF_STATE_RST_FAIL, @@ -173,29 +144,9 @@ struct hclgevf_mac { }; struct hclgevf_hw { - void __iomem *io_base; - void __iomem *mem_base; + struct hclge_comm_hw hw; int num_vec; - struct hclgevf_cmq cmq; struct hclgevf_mac mac; - void *hdev; /* hchgevf device it is part of */ -}; - -/* TQP stats */ -struct hlcgevf_tqp_stats { - /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */ - u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ - /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */ - u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ -}; - -struct hclgevf_tqp { - struct device *dev; /* device for DMA mapping */ - struct hnae3_queue q; - struct hlcgevf_tqp_stats tqp_stats; - u16 index; /* global index in a NIC controller */ - - bool alloced; }; struct hclgevf_cfg { @@ -208,27 +159,6 @@ struct hclgevf_cfg { u32 numa_node_map; }; -struct hclgevf_rss_tuple_cfg { - u8 ipv4_tcp_en; - u8 ipv4_udp_en; - u8 ipv4_sctp_en; - u8 ipv4_fragment_en; - u8 ipv6_tcp_en; - u8 ipv6_udp_en; - u8 ipv6_sctp_en; - u8 ipv6_fragment_en; -}; - -struct hclgevf_rss_cfg { - u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ - u32 hash_algo; - u32 rss_size; - u8 hw_tc_map; - /* shadow table */ - u8 *rss_indirection_tbl; - struct hclgevf_rss_tuple_cfg rss_tuple_sets; -}; - struct hclgevf_misc_vector { u8 __iomem *addr; int vector_irq; @@ -273,7 +203,7 @@ struct hclgevf_dev { struct hnae3_ae_dev *ae_dev; struct hclgevf_hw hw; struct hclgevf_misc_vector misc_vector; - struct hclgevf_rss_cfg rss_cfg; + struct hclge_comm_rss_cfg rss_cfg; unsigned long state; unsigned long flr_state; unsigned long default_reset_request; @@ -324,7 +254,7 @@ struct hclgevf_dev { struct delayed_work service_task; - struct hclgevf_tqp *htqp; + struct hclge_comm_tqp *htqp; struct hnae3_handle nic; struct hnae3_handle roce; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index fdc66fae0960..d5e0a3f762f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -53,7 +53,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) return -EIO; usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2); @@ -97,7 +98,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u8 *resp_data, u16 resp_len) { struct hclge_mbx_vf_to_pf_cmd *req; - struct hclgevf_desc desc; + struct hclge_desc desc; int status; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; @@ -114,7 +115,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); - trace_hclge_vf_mbx_send(hdev, req); + if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) + trace_hclge_vf_mbx_send(hdev, req); /* synchronous send */ if (need_resp) { @@ -150,9 +152,9 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) { - u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG); + u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); - return tail == hw->cmq.crq.next_to_use; + return tail == hw->hw.cmq.crq.next_to_use; } static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, @@ -211,14 +213,15 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { struct hclge_mbx_pf_to_vf_cmd *req; - struct hclgevf_cmq_ring *crq; - struct hclgevf_desc *desc; + struct hclge_comm_cmq_ring *crq; + struct hclge_desc *desc; u16 flag; - crq = &hdev->hw.cmq.crq; + crq = &hdev->hw.hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { dev_info(&hdev->pdev->dev, "vf crq need init\n"); return; } @@ -268,7 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ - hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG, + hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, crq->next_to_use); } @@ -295,7 +298,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) /* process all the async queue messages */ while (tail != hdev->arq.head) { - if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, + &hdev->hw.hw.comm_state)) { dev_info(&hdev->pdev->dev, "vf crq need init in async\n"); return; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index a85667078b72..93192f58ac88 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -547,7 +547,9 @@ static void hinic_get_drvinfo(struct net_device *netdev, } static void hinic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct hinic_dev *nic_dev = netdev_priv(netdev); @@ -580,7 +582,9 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev, } static int hinic_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 new_sq_depth, new_rq_depth; @@ -1205,8 +1209,6 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) return HINIC_RSS_INDIR_SIZE; } -#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0]))) - #define HINIC_FUNC_STAT(_stat_item) { \ .name = #_stat_item, \ .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ @@ -1374,7 +1376,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) break; hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); - for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) { + for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) { p = (char *)&txq_stats + hinic_tx_queue_stats[j].offset; data[i] = (hinic_tx_queue_stats[j].size == @@ -1387,7 +1389,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) break; hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); - for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) { + for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) { p = (char *)&rxq_stats + hinic_rx_queue_stats[j].offset; data[i] = (hinic_rx_queue_stats[j].size == @@ -1411,7 +1413,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev, netif_err(nic_dev, drv, netdev, "Failed to get vport stats from firmware\n"); - for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) { + for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) { p = (char *)&vport_stats + hinic_function_stats[j].offset; data[i] = (hinic_function_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; @@ -1420,8 +1422,8 @@ static void hinic_get_ethtool_stats(struct net_device *netdev, port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); if (!port_stats) { memset(&data[i], 0, - ARRAY_LEN(hinic_port_stats) * sizeof(*data)); - i += ARRAY_LEN(hinic_port_stats); + ARRAY_SIZE(hinic_port_stats) * sizeof(*data)); + i += ARRAY_SIZE(hinic_port_stats); goto get_drv_stats; } @@ -1430,7 +1432,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev, netif_err(nic_dev, drv, netdev, "Failed to get port stats from firmware\n"); - for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) { + for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) { p = (char *)port_stats + hinic_port_stats[j].offset; data[i] = (hinic_port_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; @@ -1449,14 +1451,14 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_TEST: - return ARRAY_LEN(hinic_test_strings); + return ARRAY_SIZE(hinic_test_strings); case ETH_SS_STATS: q_num = nic_dev->num_qps; - count = ARRAY_LEN(hinic_function_stats) + - (ARRAY_LEN(hinic_tx_queue_stats) + - ARRAY_LEN(hinic_rx_queue_stats)) * q_num; + count = ARRAY_SIZE(hinic_function_stats) + + (ARRAY_SIZE(hinic_tx_queue_stats) + + ARRAY_SIZE(hinic_rx_queue_stats)) * q_num; - count += ARRAY_LEN(hinic_port_stats); + count += ARRAY_SIZE(hinic_port_stats); return count; default: @@ -1476,27 +1478,27 @@ static void hinic_get_strings(struct net_device *netdev, memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); return; case ETH_SS_STATS: - for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { + for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) { memcpy(p, hinic_function_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) { + for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) { memcpy(p, hinic_port_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < nic_dev->num_qps; i++) { - for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) { + for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) { sprintf(p, hinic_tx_queue_stats[j].name, i); p += ETH_GSTRING_LEN; } } for (i = 0; i < nic_dev->num_qps; i++) { - for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) { + for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) { sprintf(p, hinic_rx_queue_stats[j].name, i); p += ETH_GSTRING_LEN; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 06586173add7..998717f02136 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, { struct hinic_hwif *hwif = attr->hwif; struct pci_dev *pdev = hwif->pdev; - size_t cell_ctxt_size; chain->hwif = hwif; chain->chain_type = attr->chain_type; @@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, sema_init(&chain->sem, 1); - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); + chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells, + sizeof(*chain->cell_ctxt), GFP_KERNEL); if (!chain->cell_ctxt) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 307a6d4af993..a627237f694b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, struct hinic_cmdq_ctxt *cmdq_ctxts; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; - size_t cmdq_ctxts_size; int err; - cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); - cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdq_ctxts), GFP_KERNEL); if (!cmdq_ctxts) return -ENOMEM; @@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct pci_dev *pdev = hwif->pdev; struct hinic_hwdev *hwdev; - size_t saved_wqs_size; u16 max_wqe_size; int err; @@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, if (!cmdqs->cmdq_buf_pool) return -ENOMEM; - saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); - cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, + sizeof(*cmdqs->saved_wqs), GFP_KERNEL); if (!cmdqs->saved_wqs) { err = -ENOMEM; goto err_saved_wqs; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 657a15447bd0..2127a48749a8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int nr_irqs, num_aeqs, num_ceqs; - size_t msix_entries_size; int i, err; num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); @@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev) if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); - msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); - hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, + hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs, + sizeof(*hwdev->msix_entries), GFP_KERNEL); if (!hwdev->msix_entries) return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index d3fc05a07fdb..045c47786a04 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq) struct hinic_hwif *hwif = eq->hwif; struct pci_dev *pdev = hwif->pdev; u32 init_val, addr, val; - size_t addr_size; int err, pg; - addr_size = eq->num_pages * sizeof(*eq->dma_addr); - eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->dma_addr), GFP_KERNEL); if (!eq->dma_addr) return -ENOMEM; - addr_size = eq->num_pages * sizeof(*eq->virt_addr); - eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages, + sizeof(*eq->virt_addr), GFP_KERNEL); if (!eq->virt_addr) { err = -ENOMEM; goto err_virt_addr_alloc; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index a6e43d686293..c4a0ba6e183a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; - size_t qps_size, wq_size, db_size; void *ci_addr_base; int i, j, err; - qps_size = num_qps * sizeof(*func_to_io->qps); - func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); + func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->qps), GFP_KERNEL); if (!func_to_io->qps) return -ENOMEM; - wq_size = num_qps * sizeof(*func_to_io->sq_wq); - func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->sq_wq), GFP_KERNEL); if (!func_to_io->sq_wq) { err = -ENOMEM; goto err_sq_wq; } - wq_size = num_qps * sizeof(*func_to_io->rq_wq); - func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->rq_wq), GFP_KERNEL); if (!func_to_io->rq_wq) { err = -ENOMEM; goto err_rq_wq; } - db_size = num_qps * sizeof(*func_to_io->sq_db); - func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); + func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps, + sizeof(*func_to_io->sq_db), GFP_KERNEL); if (!func_to_io->sq_db) { err = -ENOMEM; goto err_sq_db; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 7f0f1aa3cedd..2d9b06d7caad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wqs->num_pages * sizeof(*wqs->page_paddr); - wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_paddr), GFP_KERNEL); if (!wqs->page_paddr) return -ENOMEM; - size = wqs->num_pages * sizeof(*wqs->page_vaddr); - wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->page_vaddr), GFP_KERNEL); if (!wqs->page_vaddr) goto err_page_vaddr; - size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); - wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, + sizeof(*wqs->shadow_page_vaddr), + GFP_KERNEL); if (!wqs->shadow_page_vaddr) goto err_page_shadow_vaddr; @@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq) { struct hinic_hwif *hwif = wq->hwif; struct pci_dev *pdev = hwif->pdev; - size_t size; - size = wq->num_q_pages * wq->max_wqe_size; - wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages, + wq->max_wqe_size, GFP_KERNEL); if (!wq->shadow_wqe) return -ENOMEM; - size = wq->num_q_pages * sizeof(wq->prod_idx); - wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, + sizeof(wq->prod_idx), GFP_KERNEL); if (!wq->shadow_idx) goto err_shadow_idx; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index f9a766b8ac43..05329292d940 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev) { int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t txq_size; if (nic_dev->txqs) return -EINVAL; - txq_size = num_txqs * sizeof(*nic_dev->txqs); - nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); + nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs, + sizeof(*nic_dev->txqs), GFP_KERNEL); if (!nic_dev->txqs) return -ENOMEM; @@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev) { int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; - size_t rxq_size; if (nic_dev->rxqs) return -EINVAL; - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); + nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, + sizeof(*nic_dev->rxqs), GFP_KERNEL); if (!nic_dev->rxqs) return -ENOMEM; @@ -1394,12 +1392,8 @@ static int hinic_probe(struct pci_dev *pdev, err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Failed to set DMA mask\n"); - goto err_dma_mask; - } + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_dma_mask; } err = nic_dev_init(pdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index a78c398bf5b2..01e7d3c0b68e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> +#include <linux/module.h> #include "hinic_hw_dev.h" #include "hinic_dev.h" diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index c5bdb0d374ef..8d59babbf476 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -4,6 +4,7 @@ * Copyright(c) 2017 Huawei Technologies Co., Ltd */ +#include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> @@ -862,7 +863,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; int err, irqname_len; - size_t sges_size; txq->netdev = netdev; txq->sq = sq; @@ -871,13 +871,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, txq->max_sges = HINIC_MAX_SQ_BUFDESCS; - sges_size = txq->max_sges * sizeof(*txq->sges); - txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->sges), GFP_KERNEL); if (!txq->sges) return -ENOMEM; - sges_size = txq->max_sges * sizeof(*txq->free_sges); - txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, + sizeof(*txq->free_sges), GFP_KERNEL); if (!txq->free_sges) { err = -ENOMEM; goto err_alloc_free_sges; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index b482f6f633bd..3ee89ae496d0 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1178,7 +1178,8 @@ found: DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); for (i = 0; i < 6; i++) - DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); + DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i])); + eth_hw_addr_set(dev, eth_addr); DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c index 48e001881c75..0af70094aba3 100644 --- a/drivers/net/ethernet/i825xx/lasi_82596.c +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -147,6 +147,7 @@ lan_init_chip(struct parisc_device *dev) struct net_device *netdevice; struct i596_private *lp; int retval = -ENOMEM; + u8 addr[ETH_ALEN]; int i; if (!dev->irq) { @@ -167,13 +168,14 @@ lan_init_chip(struct parisc_device *dev) netdevice->base_addr = dev->hpa.start; netdevice->irq = dev->irq; - if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) { + if (pdc_lan_station_id(addr, netdevice->base_addr)) { for (i = 0; i < 6; i++) { - netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i); + addr[i] = gsc_readb(LAN_PROM_ADDR + i); } printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__); } + eth_hw_addr_set(netdevice, addr); lp = netdev_priv(netdevice); lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 6b3fc8823c54..fbea9f7efe8c 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2137,8 +2137,11 @@ emac_ethtool_set_link_ksettings(struct net_device *ndev, return 0; } -static void emac_ethtool_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *rp) +static void +emac_ethtool_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *rp, + struct kernel_ethtool_ringparam *kernel_rp, + struct netlink_ext_ack *extack) { rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF; rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 45ba40cf4d07..22fb0d109a68 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1890,6 +1890,7 @@ static struct attribute *veth_pool_attrs[] = { &veth_size_attr, NULL, }; +ATTRIBUTE_GROUPS(veth_pool); static const struct sysfs_ops veth_pool_ops = { .show = veth_pool_show, @@ -1899,7 +1900,7 @@ static const struct sysfs_ops veth_pool_ops = { static struct kobj_type ktype_veth_pool = { .release = NULL, .sysfs_ops = &veth_pool_ops, - .default_attrs = veth_pool_attrs, + .default_groups = veth_pool_groups, }; static int ibmveth_resume(struct device *dev) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3cca51735421..59536bd5cab1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map LTB, rc = %d\n", adapter->fw_done_rc); - rc = -1; + rc = -EIO; goto out; } rc = 0; @@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; dma_addr_t stok; + int rc; stok = dma_map_single(dev, &adapter->stats, sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); - if (dma_mapping_error(dev, stok)) { - dev_err(dev, "Couldn't map stats buffer\n"); - return -1; + rc = dma_mapping_error(dev, stok); + if (rc) { + dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc); + return rc; } adapter->stats_token = stok; @@ -628,17 +630,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) old_buff_size = adapter->prev_rx_buf_sz; new_buff_size = adapter->cur_rx_buf_sz; - /* Require buff size to be exactly same for now */ - if (old_buff_size != new_buff_size) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_rx_queues || - old_num_pools > adapter->max_rx_queues || - old_pool_size < adapter->min_rx_add_entries_per_subcrq || - old_pool_size > adapter->max_rx_add_entries_per_subcrq) + if (old_buff_size != new_buff_size || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; @@ -663,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev) u64 num_pools; u64 pool_size; /* # of buffers in one pool */ u64 buff_size; - int i, j; + int i, j, rc; pool_size = adapter->req_rx_add_entries_per_subcrq; num_pools = adapter->req_rx_queues; @@ -682,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!adapter->rx_pool) { dev_err(dev, "Failed to allocate rx pools\n"); - return -1; + return -ENOMEM; } /* Set num_active_rx_pools early. If we fail below after partial @@ -705,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->free_map) { dev_err(dev, "Couldn't alloc free_map %d\n", i); + rc = -ENOMEM; goto out_release; } @@ -713,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->rx_buff) { dev_err(dev, "Couldn't alloc rx buffers\n"); + rc = -ENOMEM; goto out_release; } } @@ -726,8 +722,9 @@ update_ltb: dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", i, rx_pool->size, rx_pool->buff_size); - if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size)) + rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size); + if (rc) goto out; for (j = 0; j < rx_pool->size; ++j) { @@ -764,7 +761,7 @@ out: /* We failed to allocate one or more LTBs or map them on the VIOS. * Hold onto the pools and any LTBs that we did allocate/map. */ - return -1; + return rc; } static void release_vpd_data(struct ibmvnic_adapter *adapter) @@ -825,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) - return -1; + return -ENOMEM; tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); if (!tx_pool->free_map) { kfree(tx_pool->tx_buff); tx_pool->tx_buff = NULL; - return -1; + return -ENOMEM; } for (i = 0; i < pool_size; i++) @@ -874,17 +871,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) old_mtu = adapter->prev_mtu; new_mtu = adapter->req_mtu; - /* Require MTU to be exactly same to reuse pools for now */ - if (old_mtu != new_mtu) - return false; - - if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) - return true; - - if (old_num_pools < adapter->min_tx_queues || - old_num_pools > adapter->max_tx_queues || - old_pool_size < adapter->min_tx_entries_per_subcrq || - old_pool_size > adapter->max_tx_entries_per_subcrq) + if (old_mtu != new_mtu || + old_num_pools != new_num_pools || + old_pool_size != new_pool_size) return false; return true; @@ -930,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev) adapter->tx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) - return -1; + return -ENOMEM; adapter->tso_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); @@ -940,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev) if (!adapter->tso_pool) { kfree(adapter->tx_pool); adapter->tx_pool = NULL; - return -1; + return -ENOMEM; } /* Set num_active_tx_pools early. If we fail below after partial @@ -1129,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev) retry = false; if (retry_count > retries) { netdev_warn(netdev, "Login attempts exceeded\n"); - return -1; + return -EACCES; } adapter->init_done_rc = 0; @@ -1170,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev) timeout)) { netdev_warn(netdev, "Capabilities query timed out\n"); - return -1; + return -ETIMEDOUT; } rc = init_sub_crqs(adapter); if (rc) { netdev_warn(netdev, "SCRQ initialization failed\n"); - return -1; + return rc; } rc = init_sub_crq_irqs(adapter); if (rc) { netdev_warn(netdev, "SCRQ irq initialization failed\n"); - return -1; + return rc; } } else if (adapter->init_done_rc) { - netdev_warn(netdev, "Adapter login failed\n"); - return -1; + netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n", + adapter->init_done_rc); + return -EIO; } } while (retry); @@ -1247,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { netdev_err(netdev, "timeout setting link state\n"); - return -1; + return -ETIMEDOUT; } if (adapter->init_done_rc == PARTIALSUCCESS) { @@ -2058,7 +2048,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_packets++; tx_bytes += skb->len; - txq->trans_start = jiffies; + txq_trans_cond_update(txq); ret = NETDEV_TX_OK; goto out; @@ -2304,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, /* If someone else changed the adapter state * when we dropped the rtnl, fail the reset */ - rc = -1; + rc = -EAGAIN; goto out; } adapter->state = VNIC_CLOSED; @@ -2346,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, } rc = ibmvnic_reset_init(adapter, true); - if (rc) { - rc = IBMVNIC_INIT_FAILED; + if (rc) goto out; - } /* If the adapter was in PROBE or DOWN state prior to the reset, * exit here. @@ -3088,7 +3076,9 @@ static u32 ibmvnic_get_link(struct net_device *netdev) } static void ibmvnic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -3108,7 +3098,9 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, } static int ibmvnic_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int ret; @@ -3775,7 +3767,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter) allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); if (!allqueues) - return -1; + return -ENOMEM; for (i = 0; i < total_queues; i++) { allqueues[i] = init_sub_crq_queue(adapter); @@ -3844,7 +3836,7 @@ tx_failed: for (i = 0; i < registered_queues; i++) release_sub_crq_queue(adapter, allqueues[i], 1); kfree(allqueues); - return -1; + return -ENOMEM; } static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) @@ -4203,7 +4195,7 @@ static int send_login(struct ibmvnic_adapter *adapter) if (!adapter->tx_scrq || !adapter->rx_scrq) { netdev_err(adapter->netdev, "RX or TX queues are not allocated, device login failed\n"); - return -1; + return -ENOMEM; } release_login_buffer(adapter); @@ -4323,7 +4315,7 @@ buf_map_failed: kfree(login_buffer); adapter->login_buf = NULL; buf_alloc_failed: - return -1; + return -ENOMEM; } static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, @@ -5644,7 +5636,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); - return -1; + return -ETIMEDOUT; } if (adapter->init_done_rc) { @@ -5655,7 +5647,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) if (adapter->from_passive_init) { adapter->state = VNIC_OPEN; adapter->from_passive_init = false; - return -1; + return -EINVAL; } if (reset && diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index b8e42f67d897..4a8f36e0ab07 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -18,8 +18,6 @@ #define IBMVNIC_NAME "ibmvnic" #define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 -#define IBMVNIC_STATS_TIMEOUT 1 -#define IBMVNIC_INIT_FAILED 2 #define IBMVNIC_OPEN_FAILED 3 /* basic structures plus 100 2k buffers */ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 0b274d8fa45b..3facb55b7161 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -327,6 +327,16 @@ config ICE_SWITCHDEV If unsure, say N. +config ICE_HWTS + bool "Support HW cross-timestamp on platforms with PTM support" + default y + depends on ICE && X86 + help + Say Y to enable hardware supported cross-timestamping on platforms + with PCIe PTM support. The cross-timestamp is available through + the PTP clock driver precise cross-timestamp ioctl + (PTP_SYS_OFFSET_PRECISE). + config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5039a2536951..4a8013f20152 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2557,7 +2557,9 @@ static int e100_set_eeprom(struct net_device *netdev, } static void e100_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nic *nic = netdev_priv(netdev); struct param_range *rfds = &nic->params.rfds; @@ -2570,7 +2572,9 @@ static void e100_get_ringparam(struct net_device *netdev, } static int e100_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nic *nic = netdev_priv(netdev); struct param_range *rfds = &nic->params.rfds; @@ -3003,9 +3007,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); + netif_device_detach(netdev); + if (netif_running(netdev)) e100_down(nic); - netif_device_detach(netdev); if ((nic->flags & wol_magic) | e100_asf(nic)) { /* enable reverse auto-negotiation */ @@ -3022,7 +3027,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = false; } - pci_clear_master(pdev); + pci_disable_device(pdev); } static int __e100_power_off(struct pci_dev *pdev, bool wake) @@ -3042,8 +3047,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d) __e100_shutdown(to_pci_dev(dev_d), &wake); - device_wakeup_disable(dev_d); - return 0; } @@ -3051,6 +3054,14 @@ static int __maybe_unused e100_resume(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); + int err; + + err = pci_enable_device(to_pci_dev(dev_d)); + if (err) { + netdev_err(netdev, "Resume cannot enable PCI device, aborting\n"); + return err; + } + pci_set_master(to_pci_dev(dev_d)); /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3062,10 +3073,11 @@ static int __maybe_unused e100_resume(struct device *dev_d) smartspeed & ~(E100_82552_REV_ANEG)); } - netif_device_attach(netdev); if (netif_running(netdev)) e100_up(nic); + netif_device_attach(netdev); + return 0; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 0a57172dfcbc..32803b0cf1e8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -539,7 +539,9 @@ static void e1000_get_drvinfo(struct net_device *netdev, } static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev, } static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 669060a2e6aa..3f5feb55cfba 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1953,7 +1953,8 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter) static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, - struct e1000_tx_buffer *buffer_info) + struct e1000_tx_buffer *buffer_info, + int budget) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -1966,7 +1967,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, buffer_info->dma = 0; } if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); + napi_consume_skb(buffer_info->skb, budget); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; @@ -1990,7 +1991,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; - e1000_unmap_and_free_tx_resource(adapter, buffer_info); + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); } netdev_reset_queue(adapter->netdev); @@ -2958,7 +2959,7 @@ dma_error: i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; - e1000_unmap_and_free_tx_resource(adapter, buffer_info); + e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0); } return 0; @@ -3856,7 +3857,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, } } - e1000_unmap_and_free_tx_resource(adapter, buffer_info); + e1000_unmap_and_free_tx_resource(adapter, buffer_info, + 64); tx_desc->upper.data = 0; if (unlikely(++i == tx_ring->count)) @@ -4382,7 +4384,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (!skb) { unsigned int frag_len = e1000_frag_len(adapter); - skb = build_skb(data - E1000_HEADROOM, frag_len); + skb = napi_build_skb(data - E1000_HEADROOM, frag_len); if (!skb) { adapter->alloc_rx_buff_failed++; break; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 8515e00d1b40..b80ae9a82224 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -655,7 +655,9 @@ static void e1000_get_drvinfo(struct net_device *netdev, } static void e1000_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -666,7 +668,9 @@ static void e1000_get_ringparam(struct net_device *netdev, } static int e1000_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 44e2dc8328a2..635a95927e93 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; - /* flags reserved for future extensions - must be zero */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 0d37f011d0ce..d53369e30040 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data) } static void fm10k_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); @@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev, } static int fm10k_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 21eff0895a7a..f6d56867f857 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -143,7 +143,7 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, * @vlan: location of buffer to store VLAN * * This function pulls the MAC address back out of the attribute and will - * place it in the array pointed by by mac_addr. It will return success + * place it in the array pointed by mac_addr. It will return success * if provided with a valid pointers. **/ s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 3d528fba754b..4d939af0a626 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -161,6 +161,7 @@ enum i40e_vsi_state_t { __I40E_VSI_OVERFLOW_PROMISC, __I40E_VSI_REINIT_REQUESTED, __I40E_VSI_DOWN_REQUESTED, + __I40E_VSI_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_VSI_STATE_SIZE__, }; @@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); int i40e_ptp_alloc_pins(struct i40e_pf *pf); +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 593912b17609..7abef88801fb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -769,21 +769,22 @@ static bool i40e_asq_done(struct i40e_hw *hw) } /** - * i40e_asq_send_command - send command to Admin Queue + * i40e_asq_send_command_atomic - send command to Admin Queue * @hw: pointer to the hw struct * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands * @cmd_details: pointer to command details structure + * @is_atomic_context: is the function called in an atomic context? * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc **/ -i40e_status i40e_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details) +i40e_status +i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ u16 buff_size, + struct i40e_asq_cmd_details *cmd_details, + bool is_atomic_context) { i40e_status status = 0; struct i40e_dma_mem *dma_buff = NULL; @@ -910,7 +911,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - udelay(50); + + if (is_atomic_context) + udelay(50); + else + usleep_range(40, 60); + total_delay += 50; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -967,6 +973,15 @@ asq_send_command_error: return status; } +i40e_status +i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_asq_send_command_atomic(hw, desc, buff, buff_size, + cmd_details, false); +} + /** * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 140b677f114d..60f9e0a6aaca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,8 +11,8 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0009 -#define I40E_FW_API_VERSION_MINOR_X710 0x0009 +#define I40E_FW_API_VERSION_MINOR_X722 0x000C +#define I40E_FW_API_VERSION_MINOR_X710 0x000F #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index b4d3fed0d2f2..9ddeb015eb7e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -154,8 +154,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) return "I40E_ERR_INVALID_MAC_ADDR"; case I40E_ERR_DEVICE_NOT_SUPPORTED: return "I40E_ERR_DEVICE_NOT_SUPPORTED"; - case I40E_ERR_MASTER_REQUESTS_PENDING: - return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_PRIMARY_REQUESTS_PENDING: + return "I40E_ERR_PRIMARY_REQUESTS_PENDING"; case I40E_ERR_INVALID_LINK_SETTINGS: return "I40E_ERR_INVALID_LINK_SETTINGS"; case I40E_ERR_AUTONEG_NOT_COMPLETE: @@ -2073,7 +2073,8 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, + cmd_details, true); return status; } @@ -2114,7 +2115,8 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0, + cmd_details, true); return status; } @@ -4136,7 +4138,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, struct i40e_filter_control_settings *settings) { u32 fcoe_cntx_size, fcoe_filt_size; - u32 pe_cntx_size, pe_filt_size; u32 fcoe_fmax; u32 val; @@ -4180,8 +4181,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, case I40E_HASH_FILTER_SIZE_256K: case I40E_HASH_FILTER_SIZE_512K: case I40E_HASH_FILTER_SIZE_1M: - pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; - pe_filt_size <<= (u32)settings->pe_filt_num; break; default: return I40E_ERR_PARAM; @@ -4198,8 +4197,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, case I40E_DMA_CNTX_SIZE_64K: case I40E_DMA_CNTX_SIZE_128K: case I40E_DMA_CNTX_SIZE_256K: - pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; - pe_cntx_size <<= (u32)settings->pe_cntx_num; break; default: return I40E_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 291e61ac3e44..2c1b1da1220e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (vsi->type != I40E_VSI_MAIN && + vsi->type != I40E_VSI_FDIR && + vsi->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "vsi %d type %d descriptor rings not available\n", + vsi_seid, vsi->type); + return; + } if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 513ba6974355..091f36adbbe1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1916,7 +1916,9 @@ static void i40e_get_drvinfo(struct net_device *netdev, } static void i40e_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -1944,7 +1946,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index) } static int i40e_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; struct i40e_netdev_priv *np = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ba862131b9bd..61afc220fc6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; +static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, + struct net_device *netdev, int delta) +{ + struct netdev_hw_addr *ha; + + if (!f || !netdev) + return; + + netdev_for_each_mc_addr(ha, netdev) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + ha->refcount += delta; + if (ha->refcount <= 0) + ha->refcount = 1; + break; + } + } +} + /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -1790,6 +1808,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, bool is_add) { struct i40e_pf *pf = vsi->back; + u16 num_tc_qps = 0; u16 sections = 0; u8 netdev_tc = 0; u16 numtc = 1; @@ -1797,13 +1816,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, u8 offset; u16 qmap; int i; - u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; + /* zero out queue mapping, it will get updated on the end of the function */ + memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); + + if (vsi->type == I40E_VSI_MAIN) { + /* This code helps add more queue to the VSI if we have + * more cores than RSS can support, the higher cores will + * be served by ATR or other filters. Furthermore, the + * non-zero req_queue_pairs says that user requested a new + * queue count via ethtool's set_channels, so use this + * value for queues distribution across traffic classes + */ + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else if (pf->flags & I40E_FLAG_MSIX_ENABLED) + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs; + if (vsi->type == I40E_VSI_MAIN || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) + num_tc_qps = vsi->num_queue_pairs; + else + num_tc_qps = vsi->alloc_queue_pairs; + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -1881,15 +1920,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, } ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - /* Set actual Tx/Rx queue pairs */ - vsi->num_queue_pairs = offset; - if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { - if (vsi->req_queue_pairs > 0) - vsi->num_queue_pairs = vsi->req_queue_pairs; - else if (pf->flags & I40E_FLAG_MSIX_ENABLED) - vsi->num_queue_pairs = pf->num_lan_msix; - } + /* Do not change previously set num_queue_pairs for PFs and VFs*/ + if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || + (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) + vsi->num_queue_pairs = offset; /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -2019,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, hlist_for_each_entry_safe(new, h, from, hlist) { /* We can simply free the wrapper structure */ hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } } @@ -2366,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) &tmp_add_list, &tmp_del_list, vlan_filters); + + hlist_for_each_entry(new, &tmp_add_list, hlist) + netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); + if (retval) goto err_no_memory_locked; @@ -2498,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (new->f->state == I40E_FILTER_NEW) new->f->state = new->state; hlist_del(&new->hlist); + netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); kfree(new); } spin_unlock_bh(&vsi->mac_filter_hash_lock); @@ -2623,7 +2664,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) { + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { int ret = i40e_sync_vsi_filters(pf->vsi[v]); if (ret) { @@ -5427,6 +5469,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, } /** + * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI + * @vsi: the VSI being reconfigured + * @vsi_offset: offset from main VF VSI + */ +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) +{ + struct i40e_vsi_context ctxt = {}; + struct i40e_pf *pf; + struct i40e_hw *hw; + int ret; + + if (!vsi) + return I40E_ERR_PARAM; + pf = vsi->back; + hw = &pf->hw; + + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + ctxt.info = vsi->info; + + i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, + false); + if (vsi->reconfig_rss) { + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); + return ret; + } + vsi->reconfig_rss = false; + } + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + return ret; +} + +/** * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map * @vsi: VSI to be configured * @enabled_tc: TC bitmap @@ -5717,24 +5811,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) } /** - * i40e_is_any_channel - channel exist or not - * @vsi: ptr to VSI to which channels are associated with - * - * Returns true or false if channel(s) exist for associated VSI or not - **/ -static bool i40e_is_any_channel(struct i40e_vsi *vsi) -{ - struct i40e_channel *ch, *ch_tmp; - - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { - if (ch->initialized) - return true; - } - - return false; -} - -/** * i40e_get_max_queues_for_channel * @vsi: ptr to VSI to which channels are associated with * @@ -6240,26 +6316,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* By default we are in VEPA mode, if this is the first VF/VMDq * VSI to be added switch to VEB mode. */ - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || - (!i40e_is_any_channel(vsi))) { - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { - dev_dbg(&pf->pdev->dev, - "Failed to create channel. Override queues (%u) not power of 2\n", - vsi->tc_config.tc_info[0].qcount); - return -EINVAL; - } - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - if (vsi->type == I40E_VSI_MAIN) { - if (pf->flags & I40E_FLAG_TC_MQPRIO) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, - true); - else - i40e_do_reset_safe(pf, - I40E_PF_RESET_FLAG); - } + if (vsi->type == I40E_VSI_MAIN) { + if (pf->flags & I40E_FLAG_TC_MQPRIO) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); + else + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } /* now onwards for main VSI, number of queues will be value * of TC0's queue count @@ -7912,12 +7977,20 @@ config_tc: vsi->seid); need_reset = true; goto exit; - } else { - dev_info(&vsi->back->pdev->dev, - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); + } else if (enabled_tc && + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { + netdev_info(netdev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + ret = -EINVAL; + need_reset = true; + goto exit; } + dev_info(&vsi->back->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + vsi->seid, vsi->tc_config.tc_info[0].qcount); + if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; @@ -8482,9 +8555,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, err = i40e_add_del_cloud_filter(vsi, filter, true); if (err) { - dev_err(&pf->pdev->dev, - "Failed to add cloud filter, err %s\n", - i40e_stat_str(&pf->hw, err)); + dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", + err); goto err; } @@ -8669,6 +8741,27 @@ int i40e_open(struct net_device *netdev) } /** + * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues + * @vsi: vsi structure + * + * This updates netdev's number of tx/rx queues + * + * Returns status of setting tx/rx queues + **/ +static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) +{ + int ret; + + ret = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (ret) + return ret; + + return netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); +} + +/** * i40e_vsi_open - * @vsi: the VSI to open * @@ -8704,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, - vsi->num_queue_pairs); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(vsi->netdev, - vsi->num_queue_pairs); + err = i40e_netif_set_realnum_tx_rx_queues(vsi); if (err) goto err_set_queues; @@ -13771,7 +13858,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } - + set_bit(__I40E_VSI_RELEASING, vsi->state); uplink_seid = vsi->uplink_seid; if (vsi->type != I40E_VSI_SRIOV) { if (vsi->netdev_registered) { @@ -14103,6 +14190,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; + ret = i40e_netif_set_realnum_tx_rx_queues(vsi); + if (ret) + goto err_netdev; ret = register_netdev(vsi->netdev); if (ret) goto err_netdev; @@ -15403,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) - dev_info(&pdev->dev, - "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", + dev_dbg(&pdev->dev, + "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, I40E_FW_API_VERSION_MAJOR, diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index aaea297640e0..9241b6005ad3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -22,11 +22,15 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw); i40e_status i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *events_pending); -i40e_status i40e_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); +i40e_status +i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +i40e_status +i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ u16 buff_size, + struct i40e_asq_cmd_details *cmd_details, + bool is_atomic_context); /* debug function for adminq */ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 09b1d5aed1c9..61e5789d78db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 77be0702d07c..db3714a65dc7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -18,7 +18,7 @@ enum i40e_status_code { I40E_ERR_ADAPTER_STOPPED = -9, I40E_ERR_INVALID_MAC_ADDR = -10, I40E_ERR_DEVICE_NOT_SUPPORTED = -11, - I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_PRIMARY_REQUESTS_PENDING = -12, I40E_ERR_INVALID_LINK_SETTINGS = -13, I40E_ERR_AUTONEG_NOT_COMPLETE = -14, I40E_ERR_RESET_FAILED = -15, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 10a83e5385c7..66cc79500c10 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2204,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -2322,7 +2322,7 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) result = I40E_XDP_REDIR; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 472f56b360b8..b785d09c19f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /***********************misc routines*****************************/ /** - * i40e_vc_disable_vf + * i40e_vc_reset_vf * @vf: pointer to the VF info - * - * Disable the VF through a SW reset. + * @notify_vf: notify vf about reset or not + * Reset VF handler. **/ -static inline void i40e_vc_disable_vf(struct i40e_vf *vf) +static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf) { struct i40e_pf *pf = vf->pf; int i; - i40e_vc_notify_vf_reset(vf); + if (notify_vf) + i40e_vc_notify_vf_reset(vf); /* We want to ensure that an actual reset occurs initiated after this * function was called. However, we do not want to wait forever, so @@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) usleep_range(10000, 20000); } - dev_warn(&vf->pf->pdev->dev, - "Failed to initiate reset for VF %d after 200 milliseconds\n", - vf->vf_id); + if (notify_vf) + dev_warn(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); + else + dev_dbg(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); } /** @@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct virtchnl_rxq_info *info) { + u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_rxq rx_ctx; - u16 pf_queue_id; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); - /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); @@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.rxmax = info->max_pkt_size; + /* if port VLAN is configured increase the max packet size */ + if (vsi->info.pvid) + rx_ctx.rxmax += VLAN_HLEN; + /* enable 32bytes desc always */ rx_ctx.dsize = 1; @@ -1868,17 +1877,19 @@ sriov_configure_out: /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf + * i40e_vc_send_msg_to_vf_ex * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length + * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) +static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen, + bool is_quiet) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1894,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; /* single place to detect unsuccessful return values */ - if (v_retval) { + if (v_retval && !is_quiet) { vf->num_invalid_msgs++; dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, v_opcode, v_retval); @@ -1925,6 +1936,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, } /** + * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + **/ +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, + msg, msglen, false); +} + +/** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info * @opcode: operation code @@ -1940,6 +1968,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, } /** + * i40e_sync_vf_state + * @vf: pointer to the VF info + * @state: VF state + * + * Called from a VF message to synchronize the service with a potential + * VF reset state + **/ +static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state) +{ + int i; + + /* When handling some messages, it needs VF state to be set. + * It is possible that this flag is cleared during VF reset, + * so there is a need to wait until the end of the reset to + * handle the request message correctly. + */ + for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) { + if (test_bit(state, &vf->vf_states)) + return true; + usleep_range(10000, 20000); + } + + return test_bit(state, &vf->vf_states); +} + +/** * i40e_vc_get_version_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1999,7 +2053,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -2106,20 +2160,6 @@ err: } /** - * i40e_vc_reset_vf_msg - * @vf: pointer to the VF info - * - * called from the VF to reset itself, - * unlike other virtchnl messages, PF driver - * doesn't send the response back to the VF - **/ -static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) -{ - if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) - i40e_reset_vf(vf, false); -} - -/** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2136,7 +2176,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg) bool allmulti = false; bool alluni = false; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -2217,13 +2257,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; - struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id = 0; - u16 num_qps_all = 0; + struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; int i, j = 0, idx = 0; + struct i40e_vsi *vsi; + u16 num_qps_all = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2310,9 +2351,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; } else { - for (i = 0; i < vf->num_tc; i++) - pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs = - vf->ch[i].num_qps; + for (i = 0; i < vf->num_tc; i++) { + vsi = pf->vsi[vf->ch[i].vsi_idx]; + vsi->num_queue_pairs = vf->ch[i].num_qps; + + if (i40e_update_adq_vsi_queues(vsi, i)) { + aq_ret = I40E_ERR_CONFIG; + goto error_param; + } + } } error_param: @@ -2366,7 +2413,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2538,7 +2585,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2588,7 +2635,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) u8 cur_pairs = vf->num_queue_pairs; struct i40e_pf *pf = vf->pf; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) return -EINVAL; if (req_pairs > I40E_MAX_VF_QUEUES) { @@ -2607,8 +2654,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) } else { /* successful request */ vf->num_req_queues = req_pairs; - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return 0; } @@ -2634,7 +2680,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) memset(&stats, 0, sizeof(struct i40e_eth_stats)); - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } @@ -2658,16 +2704,26 @@ error_param: (u8 *)&stats, sizeof(stats)); } +#define I40E_MAX_MACVLAN_PER_HW 3072 +#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ + (num_ports)) /* If the VF is not trusted restrict the number of MAC/VLAN it can program * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast */ #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) #define I40E_VC_MAX_VLAN_PER_VF 16 +#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \ +({ typeof(vf_num) vf_num_ = (vf_num); \ + typeof(num_ports) num_ports_ = (num_ports); \ + ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \ + I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \ + I40E_VC_MAX_MAC_ADDR_PER_VF; }) /** * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl + * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2682,13 +2738,16 @@ error_param: * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al) + struct virtchnl_ether_addr_list *al, + bool *is_quiet) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; + struct i40e_hw *hw = &pf->hw; int mac2add_cnt = 0; int i; + *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2712,6 +2771,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + *is_quiet = true; return -EPERM; } @@ -2725,12 +2785,26 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * number of addresses. Check to make sure that the additions do not * push us over the limit. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - (i40e_count_filters(vsi) + mac2add_cnt) > + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + if ((i40e_count_filters(vsi) + mac2add_cnt) > I40E_VC_MAX_MAC_ADDR_PER_VF) { - dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); - return -EPERM; + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + return -EPERM; + } + /* If this VF is trusted, it can use more resources than untrusted. + * However to ensure that every trusted VF has appropriate number of + * resources, divide whole pool of resources per port and then across + * all VFs. + */ + } else { + if ((i40e_count_filters(vsi) + mac2add_cnt) > + I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, + hw->num_ports)) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); + return -EPERM; + } } return 0; } @@ -2748,10 +2822,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; + bool is_quiet = false; i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2764,7 +2839,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al); + ret = i40e_check_vf_permission(vf, al, &is_quiet); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2802,8 +2877,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret); + return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0, is_quiet); } /** @@ -2823,7 +2898,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) i40e_status ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -2967,7 +3042,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -3087,9 +3162,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg) struct i40e_vsi *vsi = NULL; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) || - (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + vrk->key_len != I40E_HKEY_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3118,9 +3193,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u16 i; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) || !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) || - (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3153,7 +3228,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int len = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3189,7 +3264,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg) struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3214,7 +3289,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3240,7 +3315,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; struct i40e_vsi *vsi; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3467,7 +3542,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3598,7 +3673,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; int i, ret; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err_out; } @@ -3707,7 +3782,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) i40e_status aq_ret = 0; u64 speed = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3796,15 +3871,9 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg) /* set this flag only after making sure all inputs are sane */ vf->adq_enabled = true; - /* num_req_queues is set when user changes number of queues via ethtool - * and this causes issue for default VSI(which depends on this variable) - * when ADq is enabled, hence reset it. - */ - vf->num_req_queues = 0; /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3824,7 +3893,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) { aq_ret = I40E_ERR_PARAM; goto err; } @@ -3844,8 +3913,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg) } /* reset the VF in order to allocate resources */ - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_reset_vf(vf, true); return I40E_SUCCESS; @@ -3907,7 +3975,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - i40e_vc_reset_vf_msg(vf); + i40e_vc_reset_vf(vf, false); ret = 0; break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: @@ -4161,7 +4229,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Force the VF interface down so it has to bring up with new MAC * address */ - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: @@ -4170,34 +4238,6 @@ error_param: } /** - * i40e_vsi_has_vlans - True if VSI has configured VLANs - * @vsi: pointer to the vsi - * - * Check if a VSI has configured any VLANs. False if we have a port VLAN or if - * we have no configured VLANs. Do not call while holding the - * mac_filter_hash_lock. - */ -static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) -{ - bool have_vlans; - - /* If we have a port VLAN, then the VSI cannot have any VLANs - * configured, as all MAC/VLAN filters will be assigned to the PVID. - */ - if (vsi->info.pvid) - return false; - - /* Since we don't have a PVID, we know that if the device is in VLAN - * mode it must be because of a VLAN filter configured on this VSI. - */ - spin_lock_bh(&vsi->mac_filter_hash_lock); - have_vlans = i40e_is_vsi_in_vlan(vsi); - spin_unlock_bh(&vsi->mac_filter_hash_lock); - - return have_vlans; -} - -/** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure * @vf_id: VF identifier @@ -4253,19 +4293,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, /* duplicate request, so just return success */ goto error_pvid; - if (i40e_vsi_has_vlans(vsi)) { - dev_err(&pf->pdev->dev, - "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", - vf_id); - /* Administrator Error - knock the VF offline until he does - * the right thing by reconfiguring his network correctly - * and then reloading the VF driver. - */ - i40e_vc_disable_vf(vf); - /* During reset the VF got a new VSI, so refresh the pointer. */ - vsi = pf->vsi[vf->lan_vsi_idx]; - } - + i40e_vc_reset_vf(vf, true); + /* During reset the VF got a new VSI, so refresh a pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; /* Locked once because multiple functions below iterate list */ spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -4641,7 +4671,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; vf->trusted = setting; - i40e_vc_disable_vf(vf); + i40e_vc_reset_vf(vf, true); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 091e32c1bb46..49575a640a84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -18,6 +18,8 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 +#define I40E_VF_STATE_WAIT_COUNT 20 + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index ea06e957393e..945b1bb9c6f4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -176,7 +176,7 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) goto out_failure; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index ea88f4597a07..bb962987f300 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -22,7 +22,6 @@ struct i40e_vsi; struct xsk_buff_pool; -struct zero_copy_allocator; int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index e6e7c1da47fb..59806d1f7e79 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -39,6 +39,7 @@ #include "iavf_txrx.h" #include "iavf_fdir.h" #include "iavf_adv_rss.h" +#include <linux/bitmap.h> #define DEFAULT_DEBUG_LEVEL_SHIFT 3 #define PFX "iavf: " @@ -54,7 +55,8 @@ enum iavf_vsi_state_t { struct iavf_vsi { struct iavf_adapter *back; struct net_device *netdev; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)]; u16 seid; u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); @@ -136,14 +138,24 @@ struct iavf_q_vector { struct iavf_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; - bool is_new_mac; /* filter is new, wait for PF decision */ - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_mac:1; /* filter is new, wait for PF decision */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 is_primary:1; /* filter is a default VF MAC */ + u8 padding:4; + }; +}; + +#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid }) +struct iavf_vlan { + u16 vid; + u16 tpid; }; struct iavf_vlan_filter { struct list_head list; - u16 vlan; + struct iavf_vlan vlan; bool remove; /* filter needs to be removed */ bool add; /* filter needs to be added */ }; @@ -176,6 +188,8 @@ enum iavf_state_t { __IAVF_REMOVE, /* driver is being unloaded */ __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS, + __IAVF_INIT_CONFIG_ADAPTER, __IAVF_INIT_SW, /* got resources, setting up structs */ __IAVF_INIT_FAILED, /* init failed, restarting procedure */ __IAVF_RESETTING, /* in reset */ @@ -273,37 +287,47 @@ struct iavf_adapter { /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ - u32 aq_required; -#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0) -#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1) -#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) -#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) -#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) -#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) -#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) -#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7) -#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ -#define IAVF_FLAG_AQ_GET_CONFIG BIT(10) + u64 aq_required; +#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ -#define IAVF_FLAG_AQ_GET_HENA BIT(11) -#define IAVF_FLAG_AQ_SET_HENA BIT(12) -#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13) -#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14) -#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15) -#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16) -#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) -#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) -#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19) -#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20) -#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21) -#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22) -#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23) -#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24) -#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25) -#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26) -#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27) -#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28) +#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) +#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19) +#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20) +#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21) +#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22) +#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23) +#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24) +#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25) +#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26) +#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27) +#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28) +#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29) +#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34) +#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35) +#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36) +#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37) +#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38) /* OS defined structs */ struct net_device *netdev; @@ -343,6 +367,14 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_VLAN) +#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ + VIRTCHNL_VF_OFFLOAD_VLAN_V2) +#define VLAN_V2_FILTERING_ALLOWED(_a) \ + (VLAN_V2_ALLOWED((_a)) && \ + ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \ + (_a)->vlan_v2_caps.filtering.filtering_support.inner)) +#define VLAN_FILTERING_ALLOWED(_a) \ + (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a))) #define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_CAP_ADV_LINK_SPEED) #define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \ @@ -354,6 +386,7 @@ struct iavf_adapter { struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ ((_a)->pf_version.minor == 1)) + struct virtchnl_vlan_caps vlan_v2_caps; u16 msg_enable; struct iavf_eth_stats current_stats; struct iavf_vsi vsi; @@ -442,7 +475,9 @@ static inline void iavf_change_state(struct iavf_adapter *adapter, int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter); void iavf_schedule_reset(struct iavf_adapter *adapter); +void iavf_schedule_request_stats(struct iavf_adapter *adapter); void iavf_reset(struct iavf_adapter *adapter); void iavf_set_ethtool_ops(struct net_device *netdev); void iavf_update_stats(struct iavf_adapter *adapter); @@ -459,6 +494,9 @@ int iavf_send_api_ver(struct iavf_adapter *adapter); int iavf_verify_api_ver(struct iavf_adapter *adapter); int iavf_send_vf_config_msg(struct iavf_adapter *adapter); int iavf_get_vf_config(struct iavf_adapter *adapter); +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); void iavf_deconfigure_queues(struct iavf_adapter *adapter); @@ -494,10 +532,19 @@ void iavf_enable_channels(struct iavf_adapter *adapter); void iavf_disable_channels(struct iavf_adapter *adapter); void iavf_add_cloud_filter(struct iavf_adapter *adapter); void iavf_del_cloud_filter(struct iavf_adapter *adapter); +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid); +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features); void iavf_add_fdir_filter(struct iavf_adapter *adapter); void iavf_del_fdir_filter(struct iavf_adapter *adapter); void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9fa3fa99b4c2..cd4e6a22d0f9 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -551,15 +551,13 @@ init_adminq_exit: **/ enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) { - enum iavf_status ret_code = 0; - if (iavf_check_asq_alive(hw)) iavf_aq_queue_shutdown(hw, true); iavf_shutdown_asq(hw); iavf_shutdown_arq(hw); - return ret_code; + return 0; } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 5a359a0a20ec..3bb56714beb0 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev, **/ static int iavf_get_sset_count(struct net_device *netdev, int sset) { + /* Report the maximum number queues, even if not every queue is + * currently configured. Since allocation of queues is in pairs, + * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set + * at device creation and never changes. + */ + if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + - (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES); + (IAVF_QUEUE_STATS_LEN * 2 * + netdev->real_num_tx_queues); else if (sset == ETH_SS_PRIV_FLAGS) return IAVF_PRIV_FLAGS_STR_LEN; else @@ -354,20 +361,24 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); unsigned int i; + /* Explicitly request stats refresh */ + iavf_schedule_request_stats(adapter); + iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); - for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) { + /* As num_active_queues describe both tx and rx queues, we can use + * it to iterate over rings' stats. + */ + for (i = 0; i < adapter->num_active_queues; i++) { struct iavf_ring *ring; - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->tx_rings[i] : NULL); + /* Tx rings stats */ + ring = &adapter->tx_rings[i]; iavf_add_queue_stats(&data, ring); - /* Avoid accessing un-allocated queues */ - ring = (i < adapter->num_active_queues ? - &adapter->rx_rings[i] : NULL); + /* Rx rings stats */ + ring = &adapter->rx_rings[i]; iavf_add_queue_stats(&data, ring); } rcu_read_unlock(); @@ -404,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) iavf_add_stat_strings(&data, iavf_gstrings_stats); - /* Queues are always allocated in pairs, so we just use num_tx_queues - * for both Tx and Rx queues. + /* Queues are always allocated in pairs, so we just use + * real_num_tx_queues for both Tx and Rx queues. */ - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, @@ -580,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev, * iavf_get_ringparam - Get ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool extenal ringparam structure + * @extack: netlink extended ACK report struct * * Returns current ring parameters. TX and RX rings are reported separately, * but the number of rings is not reported. **/ static void iavf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); @@ -599,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev, * iavf_set_ringparam - Set ring parameters * @netdev: network interface device structure * @ring: ethtool ringparam structure + * @kernel_ring: ethtool external ringparam structure + * @extack: netlink extended ACK report struct * * Sets ring parameters. TX and RX rings are controlled separately, but the * number of rings is not specified, so all rings get the same settings. **/ static int iavf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct iavf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; @@ -612,23 +631,44 @@ static int iavf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - IAVF_MIN_TXD, - IAVF_MAX_TXD); - new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > IAVF_MAX_TXD || + ring->tx_pending < IAVF_MIN_TXD || + ring->rx_pending > IAVF_MAX_RXD || + ring->rx_pending < IAVF_MIN_RXD) { + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", + ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, + IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_tx_count != ring->tx_pending) + netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", + new_tx_count); - new_rx_count = clamp_t(u32, ring->rx_pending, - IAVF_MIN_RXD, - IAVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); + if (new_rx_count != ring->rx_pending) + netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", + new_rx_count); /* if nothing to do return success */ if ((new_tx_count == adapter->tx_desc_count) && - (new_rx_count == adapter->rx_desc_count)) + (new_rx_count == adapter->rx_desc_count)) { + netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; + } - adapter->tx_desc_count = new_tx_count; - adapter->rx_desc_count = new_rx_count; + if (new_tx_count != adapter->tx_desc_count) { + netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", + adapter->tx_desc_count, new_tx_count); + adapter->tx_desc_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_desc_count) { + netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", + adapter->rx_desc_count, new_rx_count); + adapter->rx_desc_count = new_rx_count; + } if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; @@ -723,12 +763,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, - struct ethtool_coalesce *ec, int queue) +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, + struct ethtool_coalesce *ec, int queue) { struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; struct iavf_q_vector *q_vector; + u16 itr_setting; + + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; + + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netif_info(adapter, drv, adapter->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); @@ -751,6 +810,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter, * the Tx and Rx ITR values based on the values we have entered * into the q_vector, no need to write the values now. */ + return 0; } /** @@ -792,9 +852,11 @@ static int __iavf_set_coalesce(struct net_device *netdev, */ if (queue < 0) { for (i = 0; i < adapter->num_active_queues; i++) - iavf_set_itr_per_queue(adapter, ec, i); + if (iavf_set_itr_per_queue(adapter, ec, i)) + return -EINVAL; } else if (queue < adapter->num_active_queues) { - iavf_set_itr_per_queue(adapter, ec, queue); + if (iavf_set_itr_per_queue(adapter, ec, queue)) + return -EINVAL; } else { netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", adapter->num_active_queues - 1); @@ -1776,6 +1838,7 @@ static int iavf_set_channels(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); u32 num_req = ch->combined_count; + int i; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -1786,7 +1849,7 @@ static int iavf_set_channels(struct net_device *netdev, /* All of these should have already been checked by ethtool before this * even gets to us, but just to be sure. */ - if (num_req > adapter->vsi_res->num_queue_pairs) + if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) return -EINVAL; if (num_req == adapter->num_active_queues) @@ -1798,6 +1861,20 @@ static int iavf_set_channels(struct net_device *netdev, adapter->num_req_queues = num_req; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; iavf_schedule_reset(adapter); + + /* wait for the reset is done */ + for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) { + msleep(IAVF_RESET_WAIT_MS); + if (adapter->flags & IAVF_FLAG_RESET_PENDING) + continue; + break; + } + if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_active_queues = num_req; + return -EOPNOTSUPP; + } + return 0; } @@ -1844,14 +1921,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (!indir) - return 0; - - memcpy(key, adapter->rss_key, adapter->rss_key_size); + if (key) + memcpy(key, adapter->rss_key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - indir[i] = (u32)adapter->rss_lut[i]; + if (indir) + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; return 0; } @@ -1863,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @key: hash key * @hfunc: hash function to use * - * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. **/ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -1872,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir, struct iavf_adapter *adapter = netdev_priv(netdev); u16 i; - /* We do not allow change in unsupported parameters */ - if (key || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + /* Only support toeplitz hash function */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (!indir) + + if (!key && !indir) return 0; if (key) memcpy(adapter->rss_key, key, adapter->rss_key_size); - /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < adapter->rss_lut_size; i++) - adapter->rss_lut[i] = (u8)(indir[i]); + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); + } return iavf_config_rss(adapter); } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 847d67e32a54..95116ef2f0ae 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, * * Returns 0 on success, negative on failure **/ -static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) { unsigned int wait, delay = 10; @@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) } /** + * iavf_schedule_request_stats - Set the flags and schedule statistics request + * @adapter: board private structure + * + * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly + * request and refresh ethtool stats + **/ +void iavf_schedule_request_stats(struct iavf_adapter *adapter) +{ + adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); +} + +/** * iavf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: queue number that is timing out @@ -450,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename) if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-TxRx-%d", basename, rx_int_idx++); + "iavf-%s-TxRx-%u", basename, rx_int_idx++); tx_int_idx++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-rx-%d", basename, rx_int_idx++); + "iavf-%s-rx-%u", basename, rx_int_idx++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name), - "iavf-%s-tx-%d", basename, tx_int_idx++); + "iavf-%s-tx-%u", basename, tx_int_idx++); } else { /* skip this unused q_vector */ continue; @@ -633,14 +646,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) * mac_vlan_list_lock. **/ static struct -iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f; list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (vlan == f->vlan) + if (f->vlan.vid == vlan.vid && + f->vlan.tpid == vlan.tpid) return f; } + return NULL; } @@ -652,7 +668,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan) * Returns ptr to the filter object or NULL when no memory available. **/ static struct -iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan) +iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, + struct iavf_vlan vlan) { struct iavf_vlan_filter *f = NULL; @@ -681,7 +698,7 @@ clearout: * @adapter: board private structure * @vlan: VLAN tag **/ -static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) +static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) { struct iavf_vlan_filter *f; @@ -697,6 +714,68 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan) } /** + * iavf_restore_filters + * @adapter: board private structure + * + * Restore existing non MAC filters when VF netdev comes back up + **/ +static void iavf_restore_filters(struct iavf_adapter *adapter) +{ + u16 vid; + + /* re-add all VLAN filters */ + for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q)); + + for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID) + iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD)); +} + +/** + * iavf_get_num_vlans_added - get number of VLANs added + * @adapter: board private structure + */ +static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) +{ + return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + + bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); +} + +/** + * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF + * @adapter: board private structure + * + * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN, + * do not impose a limit as that maintains current behavior and for + * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF. + **/ +static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter) +{ + /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has + * never been a limit on the VF driver side + */ + if (VLAN_ALLOWED(adapter)) + return VLAN_N_VID; + else if (VLAN_V2_ALLOWED(adapter)) + return adapter->vlan_v2_caps.filtering.max_filters; + + return 0; +} + +/** + * iavf_max_vlans_added - check if maximum VLANs allowed already exist + * @adapter: board private structure + **/ +static bool iavf_max_vlans_added(struct iavf_adapter *adapter) +{ + if (iavf_get_num_vlans_added(adapter) < + iavf_get_max_vlans_allowed(adapter)) + return false; + + return true; +} + +/** * iavf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct * @proto: unused protocol data @@ -707,10 +786,23 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!VLAN_ALLOWED(adapter)) + if (!VLAN_FILTERING_ALLOWED(adapter)) + return -EIO; + + if (iavf_max_vlans_added(adapter)) { + netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n", + iavf_get_max_vlans_allowed(adapter)); return -EIO; - if (iavf_add_vlan(adapter, vid) == NULL) + } + + if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM; + + if (proto == cpu_to_be16(ETH_P_8021Q)) + set_bit(vid, adapter->vsi.active_cvlans); + else + set_bit(vid, adapter->vsi.active_svlans); + return 0; } @@ -725,11 +817,13 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (VLAN_ALLOWED(adapter)) { - iavf_del_vlan(adapter, vid); - return 0; - } - return -EIO; + iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); + if (proto == cpu_to_be16(ETH_P_8021Q)) + clear_bit(vid, adapter->vsi.active_cvlans); + else + clear_bit(vid, adapter->vsi.active_svlans); + + return 0; } /** @@ -1122,6 +1216,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter) } /** + * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload + * @adapter: board private structure + * + * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or + * stripped in certain descriptor fields. Instead of checking the offload + * capability bits in the hot path, cache the location the ring specific + * flags. + */ +void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct iavf_ring *tx_ring = &adapter->tx_rings[i]; + struct iavf_ring *rx_ring = &adapter->rx_rings[i]; + + /* prevent multiple L2TAG bits being set after VFR */ + tx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2); + rx_ring->flags &= + ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 | + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2); + + if (VLAN_ALLOWED(adapter)) { + tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_supported_caps *stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support; + + stripping_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + insertion_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + + if (stripping_support->outer) { + if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } else if (stripping_support->inner) { + if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rx_ring->flags |= + IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } + + if (insertion_support->outer) { + if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->outer & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } else if (insertion_support->inner) { + if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + tx_ring->flags |= + IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_support->inner & + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + tx_ring->flags |= + IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2; + } + } + } +} + +/** * iavf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * @@ -1182,6 +1356,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) adapter->num_active_queues = num_active_queues; + iavf_set_queue_vlan_tag_loc(adapter); + return 0; err_out: @@ -1554,6 +1730,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) { if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) return iavf_send_vf_config_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) + return iavf_send_vf_offload_vlan_v2_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); return 0; @@ -1639,8 +1817,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC); return 0; } - - if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) && + if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) || (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) { iavf_set_promiscuous(adapter, 0); return 0; @@ -1688,10 +1865,133 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_del_adv_rss_cfg(adapter); return 0; } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) { + iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) { + iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) { + iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q); + return 0; + } + if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) { + iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); + return 0; + } + + if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { + iavf_request_stats(adapter); + return 0; + } + return -EAGAIN; } /** + * iavf_set_vlan_offload_features - set VLAN offload configuration + * @adapter: board private structure + * @prev_features: previous features used for comparison + * @features: updated features used for configuration + * + * Set the aq_required bit(s) based on the requested features passed in to + * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule + * the watchdog if any changes are requested to expedite the request via + * virtchnl. + **/ +void +iavf_set_vlan_offload_features(struct iavf_adapter *adapter, + netdev_features_t prev_features, + netdev_features_t features) +{ + bool enable_stripping = true, enable_insertion = true; + u16 vlan_ethertype = 0; + u64 aq_required = 0; + + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) + vlan_ethertype = ETH_P_8021AD; + else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) + vlan_ethertype = ETH_P_8021Q; + else + vlan_ethertype = ETH_P_8021Q; + + if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) + enable_stripping = false; + if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) + enable_insertion = false; + + if (VLAN_ALLOWED(adapter)) { + /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN + * stripping via virtchnl. VLAN insertion can be toggled on the + * netdev, but it doesn't require a virtchnl message + */ + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; + + } else if (VLAN_V2_ALLOWED(adapter)) { + switch (vlan_ethertype) { + case ETH_P_8021Q: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + break; + case ETH_P_8021AD: + if (enable_stripping) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + + if (enable_insertion) + aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + else + aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + } + } + + if (aq_required) { + adapter->aq_required |= aq_required; + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + } +} + +/** * iavf_startup - first step of driver startup * @adapter: board private structure * @@ -1793,6 +2093,59 @@ err: } /** + * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + struct iavf_vsi *vsi = &adapter->vsi; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && + num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, + "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, + adapter->vsi_res->num_queue_pairs); + adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + iavf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = adapter->vf_res->rss_key_size; + adapter->rss_lut_size = adapter->vf_res->rss_lut_size; + } else { + adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; + } + + return 0; +} + +/** * iavf_init_get_resources - third step of driver startup * @adapter: board private structure * @@ -1803,7 +2156,6 @@ err: **/ static void iavf_init_get_resources(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct iavf_hw *hw = &adapter->hw; int err; @@ -1821,7 +2173,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) err = iavf_get_vf_config(adapter); if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) { err = iavf_send_vf_config_msg(adapter); - goto err; + goto err_alloc; } else if (err == IAVF_ERR_PARAM) { /* We only get ERR_PARAM if the device is in a very bad * state or if we've been disabled for previous bad @@ -1836,9 +2188,83 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) goto err_alloc; } - err = iavf_process_config(adapter); + err = iavf_parse_vf_resource_msg(adapter); if (err) goto err_alloc; + + err = iavf_send_vf_offload_vlan_v2_msg(adapter); + if (err == -EOPNOTSUPP) { + /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so + * go directly to finishing initialization + */ + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + return; + } else if (err) { + dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n", + err); + goto err_alloc; + } + + /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the + * state accordingly + */ + iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + return; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_get_offload_vlan_v2_caps - part of driver startup + * @adapter: board private structure + * + * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the + * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is + * not negotiated, then this state will never be entered. + **/ +static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter) +{ + int ret; + + WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS); + + memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps)); + + ret = iavf_get_vf_vlan_v2_caps(adapter); + if (ret) { + if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK) + iavf_send_vf_offload_vlan_v2_msg(adapter); + goto err; + } + + iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + return; +err: + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void iavf_init_config_adapter(struct iavf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER); + + if (iavf_process_config(adapter)) + goto err; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED; @@ -1921,6 +2347,10 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter) else iavf_init_rss(adapter); + if (VLAN_V2_ALLOWED(adapter)) + /* request initial VLAN offload settings */ + iavf_set_vlan_offload_features(adapter, 0, netdev->features); + return; err_mem: iavf_free_rss(adapter); @@ -1928,9 +2358,6 @@ err_register: iavf_free_misc_irq(adapter); err_sw_init: iavf_reset_interrupt_capability(adapter); -err_alloc: - kfree(adapter->vf_res); - adapter->vf_res = NULL; err: iavf_change_state(adapter, __IAVF_INIT_FAILED); } @@ -1979,6 +2406,18 @@ static void iavf_watchdog_task(struct work_struct *work) queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; + case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS: + iavf_init_get_offload_vlan_v2_caps(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; + case __IAVF_INIT_CONFIG_ADAPTER: + iavf_init_config_adapter(adapter); + mutex_unlock(&adapter->crit_lock); + queue_delayed_work(iavf_wq, &adapter->watchdog_task, + msecs_to_jiffies(1)); + return; case __IAVF_INIT_FAILED: if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, @@ -2012,6 +2451,7 @@ static void iavf_watchdog_task(struct work_struct *work) } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); @@ -2031,10 +2471,13 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { + int ret = iavf_process_aq_command(adapter); + /* An error will be returned if no commands were * processed; use this opportunity to update stats + * if the error isn't -ENOTSUPP */ - if (iavf_process_aq_command(adapter) && + if (ret && ret != -EOPNOTSUPP && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } @@ -2042,16 +2485,14 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_detect_recover_hung(&adapter->vsi); break; case __IAVF_REMOVE: - mutex_unlock(&adapter->crit_lock); - return; default: + mutex_unlock(&adapter->crit_lock); return; } /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { - iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -2123,8 +2564,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - iavf_free_queues(adapter); iavf_free_q_vectors(adapter); + iavf_free_queues(adapter); memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; @@ -2152,7 +2593,6 @@ static void iavf_reset_task(struct work_struct *work) struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; struct iavf_mac_filter *f, *ftmp; - struct iavf_vlan_filter *vlf; struct iavf_cloud_filter *cf; u32 reg_val; int i = 0, err; @@ -2215,6 +2655,7 @@ static void iavf_reset_task(struct work_struct *work) } pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) { dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", @@ -2233,6 +2674,7 @@ continue_reset: (adapter->state == __IAVF_RESETTING)); if (running) { + netdev->flags &= ~IFF_UP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -2274,6 +2716,13 @@ continue_reset: } adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; + /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been + * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, + * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until + * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have + * been successfully sent and negotiated + */ + adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; spin_lock_bh(&adapter->mac_vlan_list_lock); @@ -2292,11 +2741,6 @@ continue_reset: list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; } - /* re-add all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->add = true; - } - spin_unlock_bh(&adapter->mac_vlan_list_lock); /* check if TCs are running and re-add all cloud filters */ @@ -2310,7 +2754,6 @@ continue_reset: spin_unlock_bh(&adapter->cloud_filter_list_lock); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter); @@ -2344,7 +2787,7 @@ continue_reset: * to __IAVF_RUNNING */ iavf_up_complete(adapter); - + netdev->flags |= IFF_UP; iavf_irq_enable(adapter, true); } else { iavf_change_state(adapter, __IAVF_DOWN); @@ -2357,8 +2800,10 @@ continue_reset: reset_err: mutex_unlock(&adapter->client_lock); mutex_unlock(&adapter->crit_lock); - if (running) + if (running) { iavf_change_state(adapter, __IAVF_RUNNING); + netdev->flags |= IFF_UP; + } dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2410,7 +2855,7 @@ static void iavf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); - if (val == 0xdeadbeef) /* indicates device in reset */ + if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) { @@ -2678,8 +3123,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter, total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > IAVF_MAX_REQ_QUEUES) + if (num_qps > adapter->num_active_queues) { + dev_err(&adapter->pdev->dev, + "Cannot support requested number of queues\n"); return -EINVAL; + } ret = iavf_validate_tx_bandwidth(adapter, total_max_rate); return ret; @@ -2880,7 +3328,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", match.mask->dst); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2890,7 +3338,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", match.mask->src); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2925,7 +3373,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", match.mask->vlan_id); - return IAVF_ERR_CONFIG; + return -EINVAL; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); @@ -2949,7 +3397,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -2959,13 +3407,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", be32_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) { dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); @@ -2986,7 +3434,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); - return IAVF_ERR_CONFIG; + return -EINVAL; } /* src and dest IPv6 address should not be LOOPBACK @@ -2996,7 +3444,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); - return IAVF_ERR_CONFIG; + return -EINVAL; } if (!ipv6_addr_any(&match.mask->dst) || !ipv6_addr_any(&match.mask->src)) @@ -3021,7 +3469,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", be16_to_cpu(match.mask->src)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } @@ -3031,7 +3479,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", be16_to_cpu(match.mask->dst)); - return IAVF_ERR_CONFIG; + return -EINVAL; } } if (match.key->dst) { @@ -3095,8 +3543,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, return -ENOMEM; while (!mutex_trylock(&adapter->crit_lock)) { - if (--count == 0) - goto err; + if (--count == 0) { + kfree(filter); + return err; + } udelay(1); } @@ -3107,11 +3557,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, /* start out with flow type and eth type IPv4 to begin with */ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW; err = iavf_parse_cls_flower(adapter, cls_flower, filter); - if (err < 0) + if (err) goto err; err = iavf_handle_tclass(adapter, tc, filter); - if (err < 0) + if (err) goto err; /* add filter to the list */ @@ -3308,6 +3758,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); + /* Restore VLAN filters that were removed with IFF_DOWN */ + iavf_restore_filters(adapter); + iavf_configure(adapter); iavf_up_complete(adapter); @@ -3393,6 +3846,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); + netdev_dbg(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (CLIENT_ENABLED(adapter)) { iavf_notify_client_l2_params(&adapter->vsi); @@ -3404,6 +3859,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + /** * iavf_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted @@ -3415,20 +3875,11 @@ static int iavf_set_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when adapter is not capable - * of VLAN offload - */ - if (!VLAN_ALLOWED(adapter)) { - if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) - return -EINVAL; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - adapter->aq_required |= - IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; - else - adapter->aq_required |= - IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } + /* trigger update on any VLAN feature change */ + if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^ + (features & NETIF_VLAN_OFFLOAD_FEATURES)) + iavf_set_vlan_offload_features(adapter, netdev->features, + features); return 0; } @@ -3492,6 +3943,228 @@ out_err: } /** + * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that can be toggled on and off. + **/ +static netdev_features_t +iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter) +{ + netdev_features_t hw_features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return hw_features; + + /* Enable VLAN features if supported */ + if (VLAN_ALLOWED(adapter)) { + hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED && + stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED && + insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8) + hw_features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner && + insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100) + hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } + } + + return hw_features; +} + +/** + * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures + * @adapter: board private structure + * + * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * were negotiated determine the VLAN features that are enabled by default. + **/ +static netdev_features_t +iavf_get_netdev_vlan_features(struct iavf_adapter *adapter) +{ + netdev_features_t features = 0; + + if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags) + return features; + + if (VLAN_ALLOWED(adapter)) { + features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + } else if (VLAN_V2_ALLOWED(adapter)) { + struct virtchnl_vlan_caps *vlan_v2_caps = + &adapter->vlan_v2_caps; + struct virtchnl_vlan_supported_caps *filtering_support = + &vlan_v2_caps->filtering.filtering_support; + struct virtchnl_vlan_supported_caps *stripping_support = + &vlan_v2_caps->offloads.stripping_support; + struct virtchnl_vlan_supported_caps *insertion_support = + &vlan_v2_caps->offloads.insertion_support; + u32 ethertype_init; + + /* give priority to outer stripping and don't support both outer + * and inner stripping + */ + ethertype_init = vlan_v2_caps->offloads.ethertype_init; + if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + else if (stripping_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_RX; + } else if (stripping_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (stripping_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + /* give priority to outer insertion and don't support both outer + * and inner insertion + */ + if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else if (insertion_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_TX; + } else if (insertion_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (insertion_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_TX; + } + + /* give priority to outer filtering and don't bother if both + * outer and inner filtering are enabled + */ + ethertype_init = vlan_v2_caps->filtering.ethertype_init; + if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->outer & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else if (filtering_support->inner != + VIRTCHNL_VLAN_UNSUPPORTED) { + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_8100 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (filtering_support->inner & + VIRTCHNL_VLAN_ETHERTYPE_88A8 && + ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8) + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } + } + + return features; +} + +#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \ + (!(((requested) & (feature_bit)) && \ + !((allowed) & (feature_bit)))) + +/** + * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support + * @adapter: board private structure + * @requested_features: stack requested NETDEV features + **/ +static netdev_features_t +iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter, + netdev_features_t requested_features) +{ + netdev_features_t allowed_features; + + allowed_features = iavf_get_netdev_vlan_hw_features(adapter) | + iavf_get_netdev_vlan_features(adapter); + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_TX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_TX; + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_RX)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_CTAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features, + allowed_features, + NETIF_F_HW_VLAN_STAG_FILTER)) + requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + + if ((requested_features & + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && + (requested_features & + (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) && + adapter->vlan_v2_caps.offloads.ethertype_match == + VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) { + netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); + requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX); + } + + return requested_features; +} + +/** * iavf_fix_features - fix up the netdev feature bits * @netdev: our net device * @features: desired feature bits @@ -3503,12 +4176,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); - if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) - features &= ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - - return features; + return iavf_fix_netdev_vlan_features(adapter, features); } static const struct net_device_ops iavf_netdev_ops = { @@ -3560,39 +4228,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw) int iavf_process_config(struct iavf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; - int i, num_req_queues = adapter->num_req_queues; + netdev_features_t hw_vlan_features, vlan_features; struct net_device *netdev = adapter->netdev; - struct iavf_vsi *vsi = &adapter->vsi; netdev_features_t hw_enc_features; netdev_features_t hw_features; - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < vfres->num_vsis; i++) { - if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) - adapter->vsi_res = &vfres->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); - return -ENODEV; - } - - if (num_req_queues && - num_req_queues > adapter->vsi_res->num_queue_pairs) { - /* Problem. The PF gave us fewer queues than what we had - * negotiated in our request. Need a reset to see if we can't - * get back to a working state. - */ - dev_err(&adapter->pdev->dev, - "Requested %d queues, but PF only gave us %d.\n", - num_req_queues, - adapter->vsi_res->num_queue_pairs); - adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; - iavf_schedule_reset(adapter); - return -ENODEV; - } - adapter->num_req_queues = 0; - hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -3636,19 +4276,19 @@ int iavf_process_config(struct iavf_adapter *adapter) */ hw_features = hw_enc_features; - /* Enable VLAN features if supported */ - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX); + /* get HW VLAN features that can be toggled */ + hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter); + /* Enable cloud filter if ADQ is supported */ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) hw_features |= NETIF_F_HW_TC; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO) hw_features |= NETIF_F_GSO_UDP_L4; - netdev->hw_features |= hw_features; + netdev->hw_features |= hw_features | hw_vlan_features; + vlan_features = iavf_get_netdev_vlan_features(adapter); - netdev->features |= hw_features; + netdev->features |= hw_features | vlan_features; if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; @@ -3673,21 +4313,6 @@ int iavf_process_config(struct iavf_adapter *adapter) netdev->features &= ~NETIF_F_GSO; } - adapter->vsi.id = adapter->vsi_res->vsi_id; - - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; - vsi->netdev = adapter->netdev; - vsi->qs_handle = adapter->vsi_res->qset_handle; - if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - adapter->rss_key_size = vfres->rss_key_size; - adapter->rss_lut_size = vfres->rss_lut_size; - } else { - adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE; - adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE; - } - return 0; } @@ -3957,6 +4582,7 @@ static void iavf_remove(struct pci_dev *pdev) if (iavf_lock_timeout(&adapter->crit_lock, 5000)) dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + dev_info(&adapter->pdev->dev, "Removing device\n"); /* Shut down all the garbage mashers on the detention level */ iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 3525eab8e9f9..8cbe7ad1347c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -865,6 +865,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && + vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); napi_gro_receive(&q_vector->napi, skb); } @@ -1363,7 +1366,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, net_prefetch(va); /* build an skb around the page buffer */ - skb = build_skb(va - IAVF_SKB_PAD, truesize); + skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); if (unlikely(!skb)) return NULL; @@ -1468,7 +1471,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) struct iavf_rx_buffer *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; - u16 vlan_tag; + u16 vlan_tag = 0; u8 rx_ptype; u64 qword; @@ -1551,9 +1554,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* populate checksum, VLAN, and protocol */ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? - le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && + rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) + vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); + if (rx_desc->wb.qword2.ext_status & + cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && + rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) + vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); iavf_receive_skb(rx_ring, skb, vlan_tag); @@ -1766,7 +1773,7 @@ tx_only: if (likely(napi_complete_done(napi, work_done))) iavf_update_enable_itr(vsi, q_vector); - return min(work_done, budget - 1); + return min_t(int, work_done, budget - 1); } /** @@ -1781,46 +1788,29 @@ tx_only: * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, - struct iavf_ring *tx_ring, - u32 *flags) +static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct iavf_ring *tx_ring, u32 *flags) { - __be16 protocol = skb->protocol; u32 tx_flags = 0; - if (protocol == htons(ETH_P_8021Q) && - !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { - /* When HW VLAN acceleration is turned off by the user the - * stack sets the protocol to 8021q so that the driver - * can take any steps required to support the SW only - * VLAN handling. In our case the driver doesn't need - * to take any further steps so just set the protocol - * to the encapsulated ethertype. - */ - skb->protocol = vlan_get_protocol(skb); - goto out; - } - /* if we have a HW VLAN tag being added, default to the HW one */ - if (skb_vlan_tag_present(skb)) { - tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_HW_VLAN; - /* else if it is a SW VLAN, check the next protocol and store the tag */ - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_hdr *vhdr, _vhdr; - - vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); - if (!vhdr) - return -EINVAL; + /* stack will only request hardware VLAN insertion offload for protocols + * that the driver supports and has enabled + */ + if (!skb_vlan_tag_present(skb)) + return; - protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; - tx_flags |= IAVF_TX_FLAGS_SW_VLAN; + tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; + if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { + tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; + } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + tx_flags |= IAVF_TX_FLAGS_HW_VLAN; + } else { + dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); + return; } -out: *flags = tx_flags; - return 0; } /** @@ -2440,8 +2430,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, first->gso_segs = 1; /* prepare the xmit flags */ - if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) - goto out_drop; + iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); + if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { + cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << + IAVF_TXD_CTX_QW1_CMD_SHIFT; + cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> + IAVF_TX_FLAGS_VLAN_SHIFT; + } /* obtain protocol of skb */ protocol = vlan_get_protocol(skb); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index e5b9ba42dd00..2624bf6d009e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size) #define DESC_NEEDED (MAX_SKB_FRAGS + 6) #define IAVF_MIN_DESC_PENDING 4 -#define IAVF_TX_FLAGS_HW_VLAN BIT(1) -#define IAVF_TX_FLAGS_SW_VLAN BIT(2) -#define IAVF_TX_FLAGS_TSO BIT(3) -#define IAVF_TX_FLAGS_IPV4 BIT(4) -#define IAVF_TX_FLAGS_IPV6 BIT(5) -#define IAVF_TX_FLAGS_FCCRC BIT(6) -#define IAVF_TX_FLAGS_FSO BIT(7) -#define IAVF_TX_FLAGS_FD_SB BIT(9) -#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) -#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 -#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 -#define IAVF_TX_FLAGS_VLAN_SHIFT 16 +#define IAVF_TX_FLAGS_HW_VLAN BIT(1) +#define IAVF_TX_FLAGS_SW_VLAN BIT(2) +#define IAVF_TX_FLAGS_TSO BIT(3) +#define IAVF_TX_FLAGS_IPV4 BIT(4) +#define IAVF_TX_FLAGS_IPV6 BIT(5) +#define IAVF_TX_FLAGS_FCCRC BIT(6) +#define IAVF_TX_FLAGS_FSO BIT(7) +#define IAVF_TX_FLAGS_FD_SB BIT(9) +#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10) +#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11) +#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IAVF_TX_FLAGS_VLAN_SHIFT 16 struct iavf_tx_buffer { struct iavf_tx_desc *next_to_watch; @@ -362,6 +363,9 @@ struct iavf_ring { u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) +#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) +#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) /* stats structs */ struct iavf_queue_stats stats; diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8c3f0f77cb57..5ee1d118fd30 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -137,6 +137,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | VIRTCHNL_VF_OFFLOAD_ADQ | @@ -155,6 +156,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) NULL, 0); } +int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) +{ + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; + + if (!VLAN_V2_ALLOWED(adapter)) + return -EOPNOTSUPP; + + adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS; + + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, + NULL, 0); +} + /** * iavf_validate_num_queues * @adapter: adapter structure @@ -235,6 +249,45 @@ out: return err; } +int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) +{ + struct iavf_hw *hw = &adapter->hw; + struct iavf_arq_event_info event; + enum virtchnl_ops op; + enum iavf_status err; + u16 len; + + len = sizeof(struct virtchnl_vlan_caps); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + /* When the AQ is empty, iavf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = iavf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) + break; + } + + err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low); + if (err) + goto out_alloc; + + memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len)); +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + /** * iavf_configure_queues * @adapter: adapter structure @@ -589,7 +642,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter) **/ void iavf_add_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct iavf_vlan_filter *f; bool more = false; @@ -607,48 +659,105 @@ void iavf_add_vlans(struct iavf_adapter *adapter) if (f->add) count++; } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_ADD_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + f->add = false; + if (i == count) + break; + } + } + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->add) { - vvfl->vlan_id[i] = f->vlan; - i++; - f->add = false; - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2; + + len = sizeof(*vvfl_v2) + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + i++; + f->add = false; + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER; - iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -659,7 +768,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter) **/ void iavf_del_vlans(struct iavf_adapter *adapter) { - struct virtchnl_vlan_filter_list *vvfl; struct iavf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; bool more = false; @@ -673,53 +781,123 @@ void iavf_del_vlans(struct iavf_adapter *adapter) spin_lock_bh(&adapter->mac_vlan_list_lock); - list_for_each_entry(f, &adapter->vlan_filter_list, list) { - if (f->remove) + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) { + list_del(&f->list); + kfree(f); + } else if (f->remove) { count++; + } } - if (!count) { + if (!count || !VLAN_FILTERING_ALLOWED(adapter)) { adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } - adapter->current_op = VIRTCHNL_OP_DEL_VLAN; - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); - count = (IAVF_MAX_AQ_BUF_SIZE - - sizeof(struct virtchnl_vlan_filter_list)) / - sizeof(u16); - len = sizeof(struct virtchnl_vlan_filter_list) + - (count * sizeof(u16)); - more = true; - } - vvfl = kzalloc(len, GFP_ATOMIC); - if (!vvfl) { + if (VLAN_ALLOWED(adapter)) { + struct virtchnl_vlan_filter_list *vvfl; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(*vvfl) + (count * sizeof(u16)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) / + sizeof(u16); + len = sizeof(*vvfl) + (count * sizeof(u16)); + more = true; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); - return; - } - vvfl->vsi_id = adapter->vsi_res->vsi_id; - vvfl->num_elements = count; - list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { - if (f->remove) { - vvfl->vlan_id[i] = f->vlan; - i++; - list_del(&f->list); - kfree(f); - if (i == count) - break; + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + } else { + struct virtchnl_vlan_filter_list_v2 *vvfl_v2; + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2; + + len = sizeof(*vvfl_v2) + + ((count - 1) * sizeof(struct virtchnl_vlan_filter)); + if (len > IAVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + count = (IAVF_MAX_AQ_BUF_SIZE - + sizeof(*vvfl_v2)) / + sizeof(struct virtchnl_vlan_filter); + len = sizeof(*vvfl_v2) + + ((count - 1) * + sizeof(struct virtchnl_vlan_filter)); + more = true; } - } - if (!more) - adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; - spin_unlock_bh(&adapter->mac_vlan_list_lock); + vvfl_v2 = kzalloc(len, GFP_ATOMIC); + if (!vvfl_v2) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + vvfl_v2->vport_id = adapter->vsi_res->vsi_id; + vvfl_v2->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + struct virtchnl_vlan_supported_caps *filtering_support = + &adapter->vlan_v2_caps.filtering.filtering_support; + struct virtchnl_vlan *vlan; + + /* give priority over outer if it's enabled */ + if (filtering_support->outer) + vlan = &vvfl_v2->filters[i].outer; + else + vlan = &vvfl_v2->filters[i].inner; + + vlan->tci = f->vlan.vid; + vlan->tpid = f->vlan.tpid; + + list_del(&f->list); + kfree(f); + i++; + if (i == count) + break; + } + } + + if (!more) + adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); - iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); - kfree(vvfl); + iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2, + (u8 *)vvfl_v2, len); + kfree(vvfl_v2); + } } /** @@ -752,15 +930,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags) if (flags & FLAG_VF_MULTICAST_PROMISC) { adapter->flags |= IAVF_FLAG_ALLMULTI_ON; adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI; - dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n", + adapter->netdev->name); } if (!flags) { - adapter->flags &= ~(IAVF_FLAG_PROMISC_ON | - IAVF_FLAG_ALLMULTI_ON); - adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC | - IAVF_FLAG_AQ_RELEASE_ALLMULTI); - dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + if (adapter->flags & IAVF_FLAG_PROMISC_ON) { + adapter->flags &= ~IAVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + + if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) { + adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI; + dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n", + adapter->netdev->name); + } } adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; @@ -784,6 +970,8 @@ void iavf_request_stats(struct iavf_adapter *adapter) /* no error message, this isn't crucial */ return; } + + adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS; adapter->current_op = VIRTCHNL_OP_GET_STATS; vqs.vsi_id = adapter->vsi_res->vsi_id; /* queue maps are ignored for this message - only the vsi is used */ @@ -936,6 +1124,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0); } +/** + * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + */ +static u32 iavf_tpid_to_vc_ethertype(u16 tpid) +{ + switch (tpid) { + case ETH_P_8021Q: + return VIRTCHNL_VLAN_ETHERTYPE_8100; + case ETH_P_8021AD: + return VIRTCHNL_VLAN_ETHERTYPE_88A8; + } + + return 0; +} + +/** + * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message + * @adapter: adapter structure + * @msg: message structure used for updating offloads over virtchnl to update + * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.) + * @offload_op: opcode used to determine which support structure to check + */ +static int +iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter, + struct virtchnl_vlan_setting *msg, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_supported_caps *offload_support; + u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid); + + /* reference the correct offload support structure */ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.stripping_support; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + offload_support = + &adapter->vlan_v2_caps.offloads.insertion_support; + break; + default: + dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n", + offload_op); + return -EINVAL; + } + + /* make sure ethertype is supported */ + if (offload_support->outer & vc_ethertype && + offload_support->outer & VIRTCHNL_VLAN_TOGGLE) { + msg->outer_ethertype_setting = vc_ethertype; + } else if (offload_support->inner & vc_ethertype && + offload_support->inner & VIRTCHNL_VLAN_TOGGLE) { + msg->inner_ethertype_setting = vc_ethertype; + } else { + dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n", + offload_op, tpid); + return -EINVAL; + } + + return 0; +} + +/** + * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request + * @adapter: adapter structure + * @tpid: VLAN TPID + * @offload_op: opcode used to determine which AQ required bit to clear + */ +static void +iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + switch (offload_op) { + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING; + break; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION; + break; + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + if (tpid == ETH_P_8021Q) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION; + else if (tpid == ETH_P_8021AD) + adapter->aq_required &= + ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION; + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n", + offload_op); + } +} + +/** + * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl + * @adapter: adapter structure + * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8) + * @offload_op: offload_op used to make the request over virtchnl + */ +static void +iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid, + enum virtchnl_ops offload_op) +{ + struct virtchnl_vlan_setting *msg; + int len = sizeof(*msg); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n", + offload_op, adapter->current_op); + return; + } + + adapter->current_op = offload_op; + + msg = kzalloc(len, GFP_KERNEL); + if (!msg) + return; + + msg->vport_id = adapter->vsi_res->vsi_id; + + /* always clear to prevent unsupported and endless requests */ + iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op); + + /* only send valid offload requests */ + if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op)) + iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len); + else + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(msg); +} + +/** + * iavf_enable_vlan_stripping_v2 - enable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN stripping + */ +void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_disable_vlan_stripping_v2 - disable VLAN stripping + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN stripping + */ +void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2); +} + +/** + * iavf_enable_vlan_insertion_v2 - enable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to enable VLAN insertion + */ +void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2); +} + +/** + * iavf_disable_vlan_insertion_v2 - disable VLAN insertion + * @adapter: adapter structure + * @tpid: VLAN TPID used to disable VLAN insertion + */ +void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) +{ + iavf_send_vlan_offload_v2(adapter, tpid, + VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); +} + #define IAVF_MAX_SPEED_STRLEN 13 /** @@ -1005,7 +1391,7 @@ print_link_msg: } else if (link_speed_mbps == SPEED_UNKNOWN) { snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps"); } else { - snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s", + snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps, "Mbps"); } @@ -1510,7 +1896,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: - dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n"); + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); @@ -1722,8 +2108,67 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, } spin_lock_bh(&adapter->mac_vlan_list_lock); iavf_add_filter(adapter, adapter->hw.mac.addr); + + if (VLAN_ALLOWED(adapter)) { + if (!list_empty(&adapter->vlan_filter_list)) { + struct iavf_vlan_filter *vlf; + + /* re-add all VLAN filters over virtchnl */ + list_for_each_entry(vlf, + &adapter->vlan_filter_list, + list) + vlf->add = true; + + adapter->aq_required |= + IAVF_FLAG_AQ_ADD_VLAN_FILTER; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + iavf_parse_vf_resource_msg(adapter); + + /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the + * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish + * configuration + */ + if (VLAN_V2_ALLOWED(adapter)) + break; + /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2 + * wasn't successfully negotiated with the PF + */ + } + fallthrough; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: { + if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS) + memcpy(&adapter->vlan_v2_caps, msg, + min_t(u16, msglen, + sizeof(adapter->vlan_v2_caps))); + iavf_process_config(adapter); + + /* unlock crit_lock before acquiring rtnl_lock as other + * processes holding rtnl_lock could be waiting for the same + * crit_lock + */ + mutex_unlock(&adapter->crit_lock); + /* VLAN capabilities can change during VFR, so make sure to + * update the netdev features with the new capabilities + */ + rtnl_lock(); + netdev_update_features(netdev); + rtnl_unlock(); + if (iavf_lock_timeout(&adapter->crit_lock, 10000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", + __FUNCTION__); + + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features(adapter, 0, + netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + } break; case VIRTCHNL_OP_ENABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b2db39ee5f85..4e16d185077d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -112,7 +112,6 @@ #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ #define ICE_CHNL_START_TC 1 -#define ICE_CHNL_MAX_TC 16 #define ICE_MAX_RESET_WAIT 20 @@ -201,6 +200,7 @@ struct ice_channel { struct ice_aqc_vsi_props info; u64 max_tx_rate; u64 min_tx_rate; + atomic_t num_sb_fltr; struct ice_vsi *ch_vsi; }; @@ -503,6 +503,7 @@ struct ice_pf { struct pci_dev *pdev; struct devlink_region *nvm_region; + struct devlink_region *sram_region; struct devlink_region *devcaps_region; /* devlink port data */ @@ -552,6 +553,7 @@ struct ice_pf { spinlock_t aq_wait_lock; struct hlist_head aq_wait_list; wait_queue_head_t aq_wait_queue; + bool fw_emp_reset_disabled; wait_queue_head_t reset_wait_queue; @@ -576,6 +578,7 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ + u8 rdma_mode; u16 dcbx_cap; u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; @@ -789,6 +792,9 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf) #define ICE_FD_STAT_PF_IDX(base_idx) \ ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) +#define ICE_FD_STAT_CH 1 +#define ICE_FD_CH_STAT_IDX(base_idx) \ + (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) /** * ice_is_adq_active - any active ADQs @@ -847,9 +853,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); -const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); bool ice_is_wol_supported(struct ice_hw *hw); +void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); int ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, bool is_tun); @@ -860,6 +866,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); int ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, u32 *rule_locs); +void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); void ice_fdir_release_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_fltrs(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 4eef3488d86f..ad1dcfa5ff65 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -117,6 +117,8 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_RDMA 0x0051 +#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; @@ -1408,6 +1410,11 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ #define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) +#define ICE_AQC_NVM_RESET_LVL_M ICE_M(0x3, 0) /* Write reply only */ +#define ICE_AQC_NVM_POR_FLAG 0 +#define ICE_AQC_NVM_PERST_FLAG 1 +#define ICE_AQC_NVM_EMPR_FLAG 2 +#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1efc635cc0f5..1a5ece3bce79 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -6,6 +6,18 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" +static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring) +{ + rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL); + return !!rx_ring->xdp_buf; +} + +static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring) +{ + rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); + return !!rx_ring->rx_buf; +} + /** * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * @qs_cfg: gathered variables needed for PF->VSI queues assignment @@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id); + kfree(ring->rx_buf); ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { + if (!ice_alloc_rx_buf_zc(ring)) + return -ENOMEM; xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); ring->rx_buf_len = @@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { + if (!ice_alloc_rx_buf(ring)) + return -ENOMEM; if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, @@ -759,7 +776,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_channel *ch = ring->ch; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u16 pf_q; u8 tc; @@ -804,9 +821,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ring->q_handle, 1, qg_buf, buf_len, NULL); if (status) { - dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", + status); + return status; } /* Add Tx Queue TEID into the VSI Tx ring from the @@ -929,7 +946,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; u32 val; /* clear cause_ena bit for disabled queues */ @@ -953,18 +970,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, rel_vmvf_num, NULL); /* if the disable queue command was exercised during an - * active reset flow, ICE_ERR_RESET_ONGOING is returned. + * active reset flow, -EBUSY is returned. * This is not an error as the reset operation disables * queues at the hardware level anyway. */ - if (status == ICE_ERR_RESET_ONGOING) { + if (status == -EBUSY) { dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); - } else if (status == ICE_ERR_DOES_NOT_EXIST) { + } else if (status == -ENOENT) { dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); } else if (status) { - dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", - ice_stat_str(status)); - return -ENODEV; + dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", + status); + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h new file mode 100644 index 000000000000..57abd52386d0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_CGU_REGS_H_ +#define _ICE_CGU_REGS_H_ + +#define NAC_CGU_DWORD9 0x24 +union nac_cgu_dword9 { + struct { + u32 time_ref_freq_sel : 3; + u32 clk_eref1_en : 1; + u32 clk_eref0_en : 1; + u32 time_ref_en : 1; + u32 time_sync_en : 1; + u32 one_pps_out_en : 1; + u32 clk_ref_synce_en : 1; + u32 clk_synce1_en : 1; + u32 clk_synce0_en : 1; + u32 net_clk_ref1_en : 1; + u32 net_clk_ref0_en : 1; + u32 clk_synce1_amp : 2; + u32 misc6 : 1; + u32 clk_synce0_amp : 2; + u32 one_pps_out_amp : 2; + u32 misc24 : 12; + } field; + u32 val; +}; + +#define NAC_CGU_DWORD19 0x4c +union nac_cgu_dword19 { + struct { + u32 tspll_fbdiv_intgr : 8; + u32 fdpll_ulck_thr : 5; + u32 misc15 : 3; + u32 tspll_ndivratio : 4; + u32 tspll_iref_ndivratio : 3; + u32 misc19 : 1; + u32 japll_ndivratio : 4; + u32 japll_iref_ndivratio : 3; + u32 misc27 : 1; + } field; + u32 val; +}; + +#define NAC_CGU_DWORD22 0x58 +union nac_cgu_dword22 { + struct { + u32 fdpll_frac_div_out_nc : 2; + u32 fdpll_lock_int_for : 1; + u32 synce_hdov_int_for : 1; + u32 synce_lock_int_for : 1; + u32 fdpll_phlead_slip_nc : 1; + u32 fdpll_acc1_ovfl_nc : 1; + u32 fdpll_acc2_ovfl_nc : 1; + u32 synce_status_nc : 6; + u32 fdpll_acc1f_ovfl : 1; + u32 misc18 : 1; + u32 fdpllclk_div : 4; + u32 time1588clk_div : 4; + u32 synceclk_div : 4; + u32 synceclk_sel_div2 : 1; + u32 fdpllclk_sel_div2 : 1; + u32 time1588clk_sel_div2 : 1; + u32 misc3 : 1; + } field; + u32 val; +}; + +#define NAC_CGU_DWORD24 0x60 +union nac_cgu_dword24 { + struct { + u32 tspll_fbdiv_frac : 22; + u32 misc20 : 2; + u32 ts_pll_enable : 1; + u32 time_sync_tspll_align_sel : 1; + u32 ext_synce_sel : 1; + u32 ref1588_ck_div : 4; + u32 time_ref_sel : 1; + } field; + u32 val; +}; + +#define TSPLL_CNTR_BIST_SETTINGS 0x344 +union tspll_cntr_bist_settings { + struct { + u32 i_irefgen_settling_time_cntr_7_0 : 8; + u32 i_irefgen_settling_time_ro_standby_1_0 : 2; + u32 reserved195 : 5; + u32 i_plllock_sel_0 : 1; + u32 i_plllock_sel_1 : 1; + u32 i_plllock_cnt_6_0 : 7; + u32 i_plllock_cnt_10_7 : 4; + u32 reserved200 : 4; + } field; + u32 val; +}; + +#define TSPLL_RO_BWM_LF 0x370 +union tspll_ro_bwm_lf { + struct { + u32 bw_freqov_high_cri_7_0 : 8; + u32 bw_freqov_high_cri_9_8 : 2; + u32 biascaldone_cri : 1; + u32 plllock_gain_tran_cri : 1; + u32 plllock_true_lock_cri : 1; + u32 pllunlock_flag_cri : 1; + u32 afcerr_cri : 1; + u32 afcdone_cri : 1; + u32 feedfwrdgain_cal_cri_7_0 : 8; + u32 m2fbdivmod_cri_7_0 : 8; + } field; + u32 val; +}; + +#endif /* _ICE_CGU_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index b3066d0fea8b..408d15a5b0e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" @@ -16,10 +15,10 @@ * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ -static enum ice_status ice_set_mac_type(struct ice_hw *hw) +static int ice_set_mac_type(struct ice_hw *hw) { if (hw->vendor_id != PCI_VENDOR_ID_INTEL) - return ICE_ERR_DEVICE_NOT_SUPPORTED; + return -ENODEV; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: @@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw) * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) +int ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) * ice_discover_dev_caps is expected to be called before this function is * called. */ -static enum ice_status +static int ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) - return ICE_ERR_BUF_TOO_SHORT; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); @@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); - return ICE_ERR_CFG; + return -EIO; } /* A single port can report up to two (LAN and WoL) addresses */ @@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; + int status; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; if (report_mode == ICE_AQC_REPORT_DFLT_CFG && !ice_fw_supports_report_dflt_cfg(hw)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); @@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, * returns error (ENOENT), then no cage present. If no cage present, then * connection type is backplane or BASE-T. */ -static enum ice_status +static int ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, struct ice_sq_cd *cd) { @@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; - enum ice_status status; struct ice_hw *hw; u16 cmd_flags; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; @@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, * * Set MAC configuration (0x0603) */ -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; @@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); @@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ -static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; - enum ice_status status; + int status; hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->switch_info), GFP_KERNEL); sw = hw->switch_info; if (!sw) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; @@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) * ice_get_fw_log_cfg - get FW logging configuration * @hw: pointer to the HW struct */ -static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) +static int ice_get_fw_log_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; - enum ice_status status; __le16 *config; + int status; u16 size; size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); if (!config) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); @@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) * messages from FW to SW. Interrupts are typically disabled during the device's * initialization phase. */ -static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) { struct ice_aqc_fw_logging *cmd; - enum ice_status status = 0; u16 i, chgs = 0, len = 0; struct ice_aq_desc desc; __le16 *data = NULL; u8 actv_evnts = 0; void *buf = NULL; + int status = 0; if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) return 0; @@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) sizeof(*data), GFP_KERNEL); if (!data) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } val = i << ICE_AQC_FW_LOG_ID_S; @@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw(struct ice_hw *hw) +int ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; u16 mac_buf_len; void *mac_buf; + int status; /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); @@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); if (!hw->port_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_cqinit; } @@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_sched; } @@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); - status = ICE_ERR_CFG; + status = -EIO; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); @@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll_fltr_mgmt_struct; } @@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw) * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ -enum ice_status ice_check_reset(struct ice_hw *hw) +int ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; @@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ @@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ -static enum ice_status ice_pf_reset(struct ice_hw *hw) +static int ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; @@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) - return ICE_ERR_RESET_FAILED; + return -EIO; return 0; } @@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); - return ICE_ERR_RESET_FAILED; + return -EIO; } return 0; @@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) +int ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; @@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) val = GLGEN_RTRIG_GLOBR_M; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } val |= rd32(hw, GLGEN_RTRIG); @@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * * Copies rxq context from dense structure to HW register space */ -static enum ice_status +static int ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (rxq_index > QRX_CTRL_MAX_INDEX) - return ICE_ERR_PARAM; + return -EINVAL; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { @@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) - return ICE_ERR_BAD_PTR; + return -EINVAL; rlan_ctx->prefena = 1; @@ -1369,9 +1368,8 @@ static int ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), - (struct ice_aq_desc *)desc, - buf, buf_size, cd)); + return ice_sq_send_cmd(hw, ice_get_sbq(hw), + (struct ice_aq_desc *)desc, buf, buf_size, cd); } /** @@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode) * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned. */ -static enum ice_status +static int ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc_cpy; - enum ice_status status; bool is_cmd_for_retry; u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; + int status; opcode = le16_to_cpu(desc->opcode); is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); @@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf) { buf_cpy = kzalloc(buf_size, GFP_KERNEL); if (!buf_cpy) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&desc_cpy, desc, sizeof(desc_cpy)); @@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd = &desc->params.res_owner; bool lock_acquired = false; - enum ice_status status; + int status; /* When a package download is in process (i.e. when the firmware's * Global Configuration Lock resource is held), only the Download @@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, * * Get the firmware version (0x0001) from the admin queue commands */ -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; - enum ice_status status; + int status; resp = &desc.params.get_ver; @@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) * * Send the driver version (0x0002) to the firmware */ -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { @@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, cmd = &desc.params.driver_ver; if (!dv) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); @@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; @@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: - * 1) ICE_SUCCESS - acquired lock, and can perform download package - * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load - * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has - * successfully downloaded the package; the driver does - * not have to download the package and can continue - * loading + * 1) 0 - acquired lock, and can perform download package + * 2) -EIO - did not get lock, driver should fail to load + * 3) -EALREADY - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue @@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ -static enum ice_status +static int ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd_resp = &desc.params.res_owner; @@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = le32_to_cpu(cmd_resp->timeout); - return ICE_ERR_AQ_ERROR; + return -EIO; } else if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { - return ICE_ERR_AQ_NO_WORK; + return -EALREADY; } /* invalid FW response, force a timeout immediately */ *timeout = 0; - return ICE_ERR_AQ_ERROR; + return -EIO; } /* If the resource is held by some other driver, the command completes @@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, * * release common resource using the admin queue commands (0x0009) */ -static enum ice_status +static int ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { @@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * * This function will attempt to acquire the ownership of a resource. */ -enum ice_status +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; - enum ice_status status; + int status; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + /* A return code of -EALREADY means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) goto ice_acquire_res_exit; if (status) @@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (status == ICE_ERR_AQ_NO_WORK) + if (status == -EALREADY) /* lock free, but no work to do */ break; @@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, /* lock acquired */ break; } - if (status && status != ICE_ERR_AQ_NO_WORK) + if (status && status != -EALREADY) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: - if (status == ICE_ERR_AQ_NO_WORK) { + if (status == -EALREADY) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); } return status; } @@ -1822,16 +1820,15 @@ ice_acquire_res_exit: */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { - enum ice_status status; u32 total_delay = 0; + int status; status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ - while ((status == ICE_ERR_AQ_TIMEOUT) && - (total_delay < hw->adminq.sq_cmd_timeout)) { + while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { mdelay(1); status = ice_aq_release_res(hw, res, 0, NULL); total_delay++; @@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) * * Helper function to allocate/free resources using the admin queue commands */ -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) @@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, cmd = &desc.params.sw_res_ctrl; if (!buf) - return ICE_ERR_PARAM; + return -EINVAL; if (buf_size < flex_array_size(buf, elem, num_entries)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to allocate resource. */ buf->num_elems = cpu_to_le16(num); @@ -1920,16 +1917,16 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Prepare buffer to free resource. */ buf->num_elems = cpu_to_le16(num); @@ -2071,6 +2068,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; + case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: + caps->pcie_reset_avoidance = (number > 0); + ice_debug(hw, ICE_DBG_INIT, + "%s: pcie_reset_avoidance = %d\n", prefix, + caps->pcie_reset_avoidance); + break; + case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: + caps->reset_restrict_support = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "%s: reset_restrict_support = %d\n", prefix, + caps->reset_restrict_support); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2180,6 +2189,18 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { + info->time_ref = (enum ice_time_ref_freq)info->clk_freq; + } else { + /* Unknown clock frequency, so assume a (probably incorrect) + * default to avoid out-of-bounds look ups of frequency + * related information. + */ + ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", + info->clk_freq); + info->time_ref = ICE_TIME_REF_FREQ_25_000; + } + ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", func_p->common_cap.ieee_1588); ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", @@ -2486,19 +2507,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); @@ -2517,16 +2538,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2551,16 +2572,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) * Read the function capabilities and extract them into the func_caps structure * for later use. */ -static enum ice_status +static int ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { - enum ice_status status; u32 cap_count = 0; void *cbuf; + int status; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!cbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum @@ -2650,9 +2671,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ -enum ice_status ice_get_caps(struct ice_hw *hw) +int ice_get_caps(struct ice_hw *hw) { - enum ice_status status; + int status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) @@ -2670,7 +2691,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) * * This function is used to write MAC address to the NVM (0x0108). */ -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { @@ -2692,7 +2713,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, * * Tell the firmware that the driver is taking over from PXE (0x0110). */ -static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) +static int ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -2903,15 +2924,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -enum ice_status +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (!cfg) - return ICE_ERR_PARAM; + return -EINVAL; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { @@ -2952,13 +2973,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ -enum ice_status ice_update_link_info(struct ice_port_info *pi) +int ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; li = &pi->phy.link_info; @@ -2974,7 +2995,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); @@ -3070,7 +3091,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { @@ -3078,7 +3099,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, u8 pause_mask = 0x0; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; switch (req_mode) { case ICE_FC_FULL: @@ -3117,23 +3138,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, * * Set the requested flow control mode. */ -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !aq_failures) - return ICE_ERR_BAD_PTR; + return -EINVAL; *aq_failures = 0; hw = pi->hw; pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, @@ -3258,22 +3279,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || !cfg) - return ICE_ERR_BAD_PTR; + return -EINVAL; hw = pi->hw; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, (ice_fw_supports_report_dflt_cfg(hw) ? @@ -3313,7 +3334,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, cfg->link_fec_opt |= pcaps->link_fec_options; break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; break; } @@ -3344,13 +3365,13 @@ out: * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ -enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) +int ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; - enum ice_status status = 0; + int status = 0; if (!pi || !link_up) - return ICE_ERR_PARAM; + return -EINVAL; phy_info = &pi->phy; @@ -3375,7 +3396,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) * * Sets up the link and restarts the Auto-Negotiation over the link. */ -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { @@ -3405,7 +3426,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, * * Set event mask (0x0613) */ -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { @@ -3430,7 +3451,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, * * Enable/disable loopback on a given port */ -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; @@ -3453,7 +3474,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) * * Set LED value for the given port (0x06e9) */ -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { @@ -3488,17 +3509,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, * * Read/Write SFF EEPROM (0x06EE) */ -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || (mem_addr & 0xff00)) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; @@ -3527,23 +3548,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ -static enum ice_status +static int __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; - enum ice_status status; + int status; u8 *lut; if (!params) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) - return ICE_ERR_PARAM; + return -EINVAL; lut_size = params->lut_size; lut_type = params->lut_type; @@ -3572,7 +3593,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3607,7 +3628,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params } fallthrough; default: - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_get_set_rss_lut_exit; } @@ -3626,7 +3647,7 @@ ice_aq_get_set_rss_lut_exit: * * get the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); @@ -3639,7 +3660,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_ * * set the RSS lookup table, PF or VSI type */ -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); @@ -3654,10 +3675,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_ * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ -static enum -ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, - struct ice_aqc_get_set_rss_keys *key, - bool set) +static int +__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, + struct ice_aqc_get_set_rss_keys *key, bool set) { struct ice_aqc_get_set_rss_key *cmd_resp; u16 key_size = sizeof(*key); @@ -3688,12 +3708,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, * * get the RSS key per VSI */ -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); @@ -3707,12 +3727,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, * * set the RSS key per VSI */ -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) - return ICE_ERR_PARAM; + return -EINVAL; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); @@ -3739,7 +3759,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ -static enum ice_status +static int ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) @@ -3754,10 +3774,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += struct_size(list, txqs, list->num_txqs); @@ -3766,7 +3786,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sum_size) - return ICE_ERR_PARAM; + return -EINVAL; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -3787,7 +3807,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * * Disable LAN Tx queue (0x0C31) */ -static enum ice_status +static int ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, @@ -3796,18 +3816,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 i, sz = 0; + int status; cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) - return ICE_ERR_PARAM; + return -EINVAL; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) - return ICE_ERR_PARAM; + return -EINVAL; cmd->num_entries = num_qgrps; @@ -3856,7 +3876,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, } if (buf_size != sz) - return ICE_ERR_PARAM; + return -EINVAL; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); @@ -3914,8 +3934,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, cmd->num_qset_grps = num_qset_grps; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, - buf_size, cd)); + return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); } /* End of FW Admin Queue command wrappers */ @@ -4111,7 +4130,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { @@ -4141,7 +4160,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } } @@ -4185,7 +4204,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) * * This function adds one LAN queue */ -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -4193,19 +4212,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (num_qgrps > 1 || buf->num_txqs > 1) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4213,7 +4232,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4221,7 +4240,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ena_txq_exit; } @@ -4290,20 +4309,20 @@ ena_txq_exit: * * This function removes queues and their corresponding nodes in SW DB */ -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { - enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; + int status = -ENOENT; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; hw = pi->hw; @@ -4315,13 +4334,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); - return ICE_ERR_CFG; + return -EIO; } buf_size = struct_size(qg_list, q_id, 1); qg_list = kzalloc(buf_size, GFP_KERNEL); if (!qg_list) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&pi->sched_lock); @@ -4368,18 +4387,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, * * This function adds/updates the VSI queues per TC. */ -static enum ice_status +static int ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *maxqs, u8 owner) { - enum ice_status status = 0; + int status = 0; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) - return ICE_ERR_CFG; + return -EIO; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); @@ -4407,7 +4426,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, * * This function adds/updates the VSI LAN queues per TC. */ -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs) { @@ -4428,9 +4447,8 @@ int ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs) { - return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, - max_rdmaqs, - ICE_SCHED_NODE_OWNER_RDMA)); + return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, + ICE_SCHED_NODE_OWNER_RDMA); } /** @@ -4451,7 +4469,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_add_rdma_qset_data *buf; struct ice_sched_node *parent; - enum ice_status status; struct ice_hw *hw; u16 i, buf_size; int ret; @@ -4502,12 +4519,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; for (i = 0; i < num_qsets; i++) { node.node_teid = buf->rdma_qsets[i].qset_teid; - status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, - &node); - if (status) { - ret = ice_status_to_errno(status); + ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, + &node); + if (ret) break; - } qset_teid[i] = le32_to_cpu(node.node_teid); } rdma_error_exit: @@ -4528,8 +4543,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { struct ice_aqc_dis_txq_item *qg_list; - enum ice_status status = 0; struct ice_hw *hw; + int status = 0; u16 qg_size; int i; @@ -4568,7 +4583,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, mutex_unlock(&pi->sched_lock); kfree(qg_list); - return ice_status_to_errno(status); + return status; } /** @@ -4577,7 +4592,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ -static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +static int ice_replay_pre_init(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; u8 i; @@ -4588,7 +4603,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) * will allow adding rules entries back to filt_rules list, * which is operational list. */ - for (i = 0; i < ICE_SW_LKUP_LAST; i++) + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) list_replace_init(&sw->recp_list[i].filt_rules, &sw->recp_list[i].filt_replay_rules); ice_sched_replay_agg_vsi_preinit(hw); @@ -4604,12 +4619,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Replay pre-initialization if there is any */ if (vsi_handle == ICE_MAIN_VSI_HANDLE) { @@ -4725,12 +4740,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, * * This function queries HW element information */ -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; - enum ice_status status; + int status; buf_size = sizeof(*buf); memset(buf, 0, buf_size); @@ -4775,7 +4790,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, cmd->param_indx = idx; cmd->param_val = cpu_to_le32(value); - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4796,7 +4811,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, { struct ice_aqc_driver_shared_params *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (idx >= ICE_AQC_DRIVER_PARAM_MAX) return -EIO; @@ -4810,7 +4825,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = le32_to_cpu(cmd->param_val); @@ -4840,7 +4855,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; - return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** @@ -4860,7 +4875,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; @@ -4869,7 +4884,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; *value = !!cmd->gpio_val; return 0; @@ -4903,13 +4918,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw) * * Gets the link default override for a port */ -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); @@ -4994,7 +5009,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) * * Set the LLDP MIB. (0x0A08) */ -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { @@ -5004,7 +5019,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); @@ -5044,7 +5059,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 65c1b3244264..1c57097ddf0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -14,108 +14,108 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 -enum ice_status ice_init_hw(struct ice_hw *hw); +int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); -enum ice_status ice_check_reset(struct ice_hw *hw); -enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); +int ice_check_reset(struct ice_hw *hw); +int ice_reset(struct ice_hw *hw, enum ice_reset_req req); +int ice_create_all_ctrlq(struct ice_hw *hw); +int ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_destroy_all_ctrlq(struct ice_hw *hw); -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); -enum ice_status +int ice_get_link_status(struct ice_port_info *pi, bool *link_up); -enum ice_status ice_update_link_info(struct ice_port_info *pi); -enum ice_status +int ice_update_link_info(struct ice_port_info *pi); +int ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); -enum ice_status +int ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); -enum ice_status +int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); -enum ice_status +int ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); bool ice_is_sbq_supported(struct ice_hw *hw); struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); -enum ice_status ice_get_caps(struct ice_hw *hw); +int ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); -enum ice_status +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); -enum ice_status +int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); -enum ice_status +int ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); -enum ice_status +int ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); -enum ice_status +int ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); -enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); +int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -enum ice_status +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; -enum ice_status +int ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); +int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); -enum ice_status +int ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); -enum ice_status +int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_e810(struct ice_hw *hw); -enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); -enum ice_status +int ice_clear_pf_cfg(struct ice_hw *hw); +int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); -enum ice_status +int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); -enum ice_status +int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); -enum ice_status +int ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode fc); bool @@ -125,27 +125,27 @@ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); -enum ice_status +int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); -enum ice_status +int ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); @@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, int ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id); -enum ice_status +int ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); -enum ice_status +int ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); -enum ice_status +int ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); -enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); struct ice_q_ctx * @@ -184,7 +184,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); -enum ice_status +int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); int @@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -enum ice_status +int ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 03bdb125be36..6bcfee295991 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); @@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->sq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->sq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.desc_buf.size = size; cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, @@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.desc_buf.va = NULL; cq->sq.desc_buf.pa = 0; cq->sq.desc_buf.size = 0; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } return 0; @@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); @@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) &cq->rq.desc_buf.pa, GFP_KERNEL | __GFP_ZERO); if (!cq->rq.desc_buf.va) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.desc_buf.size = size; return 0; } @@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, sizeof(cq->rq.desc_buf), GFP_KERNEL); if (!cq->rq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ @@ -218,7 +218,7 @@ unwind_alloc_rq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); cq->rq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -226,7 +226,7 @@ unwind_alloc_rq_bufs: * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ -static enum ice_status +static int ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; @@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sizeof(cq->sq.desc_buf), GFP_KERNEL); if (!cq->sq.dma_head) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ @@ -266,10 +266,10 @@ unwind_alloc_sq_bufs: devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); cq->sq.dma_head = NULL; - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } -static enum ice_status +static int ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ @@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) - return ICE_ERR_AQ_ERROR; + return -EIO; return 0; } @@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) * * Configure base address and length registers for the transmit queue */ -static enum ice_status -ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } @@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * * Configure base address and length registers for the receive (event queue) */ -static enum ice_status -ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status status; + int status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) @@ -361,19 +359,19 @@ do { \ * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->sq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -421,19 +419,19 @@ init_ctrlq_exit: * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ -static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code; + int ret_code; if (cq->rq.count > 0) { /* queue already initialized */ - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { - ret_code = ICE_ERR_CFG; + ret_code = -EIO; goto init_ctrlq_exit; } @@ -474,15 +472,14 @@ init_ctrlq_exit: * * The main shutdown routine for the Control Transmit Queue */ -static enum ice_status -ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->sq_lock); if (!cq->sq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_sq_out; } @@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw) * * The main shutdown routine for the Control Receive Queue */ -static enum ice_status -ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - enum ice_status ret_code = 0; + int ret_code = 0; mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ret_code = ICE_ERR_NOT_READY; + ret_code = -EBUSY; goto shutdown_rq_out; } @@ -576,17 +572,17 @@ shutdown_rq_out: * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ -static enum ice_status ice_init_check_adminq(struct ice_hw *hw) +static int ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - enum ice_status status; + int status; status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; if (!ice_aq_ver_check(hw)) { - status = ICE_ERR_FW_API_VER; + status = -EIO; goto init_ctrlq_free_rq; } @@ -612,10 +608,10 @@ init_ctrlq_free_rq: * * NOTE: this function does not initialize the controlq locks */ -static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; - enum ice_status ret_code; + int ret_code; switch (q_type) { case ICE_CTL_Q_ADMIN: @@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) cq = &hw->mailboxq; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { - return ICE_ERR_CFG; + return -EIO; } /* setup SQ command write back timeout */ @@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) * * NOTE: this function does not initialize the controlq locks. */ -enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) +int ice_init_all_ctrlq(struct ice_hw *hw) { - enum ice_status status; u32 retry = 0; + int status; /* Init FW admin queue */ do { @@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) return status; status = ice_init_check_adminq(hw); - if (status != ICE_ERR_AQ_FW_CRITICAL) + if (status != -EIO) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); @@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ -enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) +int ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ -enum ice_status +int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) @@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - enum ice_status status = 0; struct ice_sq_cd *details; u32 total_delay = 0; + int status = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) - return ICE_ERR_RESET_ONGOING; + return -EBUSY; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto sq_send_command_error; } @@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto sq_send_command_error; } @@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); - status = ICE_ERR_AQ_EMPTY; + status = -EIO; goto sq_send_command_error; } @@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); - status = ICE_ERR_AQ_FULL; + status = -ENOSPC; goto sq_send_command_error; } @@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); - status = ICE_ERR_AQ_ERROR; + status = -EIO; } else { memcpy(buf, dma_buf->va, copy_size); } @@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) - status = ICE_ERR_AQ_ERROR; + status = -EIO; cq->sq_last_status = (enum ice_aq_err)retval; } @@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); - status = ICE_ERR_AQ_FW_CRITICAL; + status = -EIO; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); - status = ICE_ERR_AQ_TIMEOUT; + status = -EIO; } } @@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * the contents through e. It can also return how many events are * left to process through 'pending'. */ -enum ice_status +int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; enum ice_aq_err rq_last_status; - enum ice_status ret_code = 0; struct ice_aq_desc *desc; struct ice_dma_mem *bi; + int ret_code = 0; u16 desc_idx; u16 datalen; u16 flags; @@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); - ret_code = ICE_ERR_AQ_EMPTY; + ret_code = -EIO; goto clean_rq_elem_err; } @@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - ret_code = ICE_ERR_AQ_NO_WORK; + ret_code = -EALREADY; goto clean_rq_elem_out; } @@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { - ret_code = ICE_ERR_AQ_ERROR; + ret_code = -EIO; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), rq_last_status); } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 241427cd9bc0..0b146a0d4205 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,7 +2,6 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" -#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -19,19 +18,19 @@ * * Requests the complete LLDP MIB (entire packet). (0x0A00) */ -static enum ice_status +static int ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_get_mib *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.lldp_get_mib; if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); @@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ -static enum ice_status +static int ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { @@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { @@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, * * Start the embedded LLDP Agent on all ports. (0x0A06) */ -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; struct ice_aq_desc desc; @@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) * * Parse DCB configuration from the LLDPDU */ -static enum ice_status -ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; - enum ice_status ret = 0; u16 offset = 0; + int ret = 0; u16 typelen; u16 type; u16 len; if (!lldpmib || !dcbcfg) - return ICE_ERR_PARAM; + return -EINVAL; /* set to the start of LLDPDU */ lldpmib += ETH_HLEN; @@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) * * Query DCB configuration from the firmware */ -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { - enum ice_status ret; u8 *lldpmib; + int ret; /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ICE_LLDPDU_SIZE, NULL, NULL, NULL); @@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function - * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * returns 0, caller will need to check if FW returns back the same * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) */ -enum ice_status +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; struct ice_aq_desc desc; u16 opcode; + int status; cmd = &desc.params.lldp_agent_ctrl; @@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, * * Get CEE DCBX mode operational configuration from firmware (0x0A07) */ -static enum ice_status +static int ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd) @@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) return -EINVAL; @@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) - return ice_status_to_errno(status); + return status; /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has @@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, * * Get IEEE or CEE mode DCB configuration from the Firmware */ -static enum ice_status -ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) +static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) { struct ice_dcbx_cfg *dcbx_cfg = NULL; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; if (dcbx_mode == ICE_DCBX_MODE_IEEE) dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; @@ -943,14 +939,14 @@ out: * * Get DCB configuration from the Firmware */ -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +int ice_get_dcb_cfg(struct ice_port_info *pi) { struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status ret; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (!ret) { @@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) * * Update DCB configuration from the Firmware */ -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret = 0; + int ret = 0; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; qos_cfg->is_sw_lldp = true; @@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) return ret; qos_cfg->is_sw_lldp = false; } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { - return ICE_ERR_NOT_READY; + return -EBUSY; } /* Configure the LLDP MIB change event */ @@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) * * Configure (disable/enable) MIB */ -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; - enum ice_status ret; + int ret; if (!hw->func_caps.common_cap.dcb) - return ICE_ERR_NOT_SUPPORTED; + return -EOPNOTSUPP; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) - return ICE_ERR_NOT_READY; + return -EBUSY; ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) @@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) * * Set DCB configuration to the Firmware */ -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +int ice_set_dcb_cfg(struct ice_port_info *pi) { u8 mib_type, *lldpmib = NULL; struct ice_dcbx_cfg *dcbcfg; - enum ice_status ret; struct ice_hw *hw; u16 miblen; + int ret; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) /* Allocate the LLDPDU */ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); if (!lldpmib) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) @@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) * * query current port ETS configuration */ -static enum ice_status +static int ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_port_ets *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.port_ets; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); cmd->port_teid = pi->root->info.node_teid; @@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi, * * update the SW DB with the new TC changes */ -static enum ice_status +static int ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; struct ice_aqc_txsched_elem_data elem; - enum ice_status status = 0; u32 teid1, teid2; + int status = 0; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; /* suspend the missing TC nodes */ for (i = 0; i < pi->root->num_children; i++) { teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); @@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, * query current port ETS configuration and update the * SW DB with the TC changes */ -enum ice_status +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_aq_query_port_ets(pi, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index 9b6f87a889a6..d73348f279f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -138,28 +138,27 @@ struct ice_cee_app_prio { } __packed; int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); -enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); -enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); -enum ice_status +int ice_get_dcb_cfg(struct ice_port_info *pi); +int ice_set_dcb_cfg(struct ice_port_info *pi); +int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); +int ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cmd_details); #ifdef CONFIG_DCB -enum ice_status +int ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); -enum ice_status -ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); +int ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); -enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); +int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); #else /* CONFIG_DCB */ -static inline enum ice_status +static inline int ice_aq_stop_lldp(struct ice_hw __always_unused *hw, bool __always_unused shutdown_lldp_agent, bool __always_unused persist, @@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_lldp(struct ice_hw __always_unused *hw, bool __always_unused persist, struct ice_sq_cd __always_unused *cd) @@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, bool __always_unused start_dcbx_agent, bool *dcbx_agent_status, @@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, return 0; } -static inline enum ice_status +static inline int ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, bool __always_unused ena_mib) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a72e18320a22..b94d8daeaa58 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) struct ice_aqc_port_ets_elem buf = { 0 }; struct device *dev = ice_pf_to_dev(pf); struct ice_dcbx_cfg *err_cfg; - enum ice_status ret; + int ret; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 7fdeb411b6df..3eb01731e496 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; + if (!bwcfg) + new_cfg->etscfg.tcbwtable[0] = 100; + if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) if (mode == pf->dcbx_cap) return ICE_DCB_NO_HW_CHG; - pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) { - if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) - return ICE_DCB_NO_HW_CHG; + + /* DSCP configuration is not DCBx negotiated */ + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; + + pf->dcbx_cap = mode; + + if (mode & DCB_CAP_DCBX_VER_CEE) qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - } else { + else qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index b9bd9f9472f6..a230edb38466 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020, Intel Corporation. */ +#include <linux/vmalloc.h> + #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" @@ -39,13 +41,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - enum ice_status status; + int status; status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) /* We failed to locate the PBA, so just skip this entry */ - dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", - ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n", + status); } static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -251,7 +253,6 @@ static int ice_devlink_info_get(struct devlink *devlink, struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_info_ctx *ctx; - enum ice_status status; size_t i; int err; @@ -266,20 +267,19 @@ static int ice_devlink_info_get(struct devlink *devlink, return -ENOMEM; /* discover capabilities first */ - status = ice_discover_dev_caps(hw, &ctx->dev_caps); - if (status) { - dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_discover_dev_caps(hw, &ctx->dev_caps); + if (err) { + dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities"); - err = -EIO; goto out_free_ctx; } if (ctx->dev_caps.common_cap.nvm_update_pending_orom) { - status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); - if (status) { - dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom); + if (err) { + dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_orom = false; @@ -287,10 +287,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) { - status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); - if (status) { - dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm); + if (err) { + dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false; @@ -298,10 +298,10 @@ static int ice_devlink_info_get(struct devlink *devlink, } if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) { - status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); - if (status) { - dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n", - ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status)); + err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist); + if (err) { + dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); /* disable display of pending Option ROM */ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false; @@ -373,63 +373,225 @@ out_free_ctx: } /** - * ice_devlink_flash_update - Update firmware stored in flash on the device - * @devlink: pointer to devlink associated with device to update - * @params: flash update parameters + * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE + * @limit: limits on what reload should do, such as not resetting * @extack: netlink extended ACK structure * - * Perform a device flash update. The bulk of the update logic is contained - * within the ice_flash_pldm_image function. + * Allow user to activate new Embedded Management Processor firmware by + * issuing device specific EMP reset. Called in response to + * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. * - * Returns: zero on success, or an error code on failure. + * Note that teardown and rebuild of the driver state happens automatically as + * part of an interrupt and watchdog task. This is because all physical + * functions on the device must be able to reset when an EMP reset occurs from + * any source. */ static int -ice_devlink_flash_update(struct devlink *devlink, - struct devlink_flash_update_params *params, - struct netlink_ext_ack *extack) +ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - u8 preservation; + u8 pending; int err; - if (!params->overwrite_mask) { - /* preserve all settings and identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_ALL; - } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) { - /* overwrite settings, but preserve the vital device identifiers */ - preservation = ICE_AQC_NVM_PRESERVE_SELECTED; - } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS | - DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) { - /* overwrite both settings and identifiers, preserve nothing */ - preservation = ICE_AQC_NVM_NO_PRESERVATION; - } else { - NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); - return -EOPNOTSUPP; + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + + /* pending is a bitmask of which flash banks have a pending update, + * including the main NVM bank, the Option ROM bank, and the netlist + * bank. If any of these bits are set, then there is a pending update + * waiting to be activated. + */ + if (!pending) { + NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); + return -ECANCELED; } - if (!hw->dev_caps.common_cap.nvm_unified_update) { - NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); - return -EOPNOTSUPP; + if (pf->fw_emp_reset_disabled) { + NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); + return -ECANCELED; } - err = ice_check_for_pending_update(pf, NULL, extack); - if (err) + dev_dbg(dev, "Issuing device EMP reset to activate firmware\n"); + + err = ice_aq_nvm_update_empr(hw); + if (err) { + dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware"); return err; + } - devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); + return 0; +} - return ice_flash_pldm_image(pf, params->fw, preservation, extack); +/** + * ice_devlink_reload_empr_finish - Wait for EMP reset to finish + * @devlink: pointer to the devlink instance reloading + * @action: the action requested + * @limit: limits imposed by userspace, such as not resetting + * @actions_performed: on return, indicate what actions actually performed + * @extack: netlink extended ACK structure + * + * Wait for driver to finish rebuilding after EMP reset is completed. This + * includes time to wait for both the actual device reset as well as the time + * for the driver's rebuild to complete. + */ +static int +ice_devlink_reload_empr_finish(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); + + err = ice_wait_for_reset(pf, 60 * HZ); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); + return err; + } + + return 0; } static const struct devlink_ops ice_devlink_ops = { .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + /* The ice driver currently does not support driver reinit */ + .reload_down = ice_devlink_reload_empr_start, + .reload_up = ice_devlink_reload_empr_finish, .eswitch_mode_get = ice_eswitch_mode_get, .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, .flash_update = ice_devlink_flash_update, }; +static int +ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + + ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; + + return 0; +} + +static int +ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool roce_ena = ctx->val.vbool; + int ret; + + if (!roce_ena) { + ice_unplug_aux_dev(pf); + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + return 0; + } + + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + ret = ice_plug_aux_dev(pf); + if (ret) + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + + return ret; +} + +static int +ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { + NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + + ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; + + return 0; +} + +static int +ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool iw_ena = ctx->val.vbool; + int ret; + + if (!iw_ena) { + ice_unplug_aux_dev(pf); + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + return 0; + } + + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; + ret = ice_plug_aux_dev(pf); + if (ret) + pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + + return ret; +} + +static int +ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { + NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct devlink_param ice_devlink_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_roce_get, + ice_devlink_enable_roce_set, + ice_devlink_enable_roce_validate), + DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_iw_get, + ice_devlink_enable_iw_set, + ice_devlink_enable_iw_validate), + +}; + static void ice_devlink_free(void *devlink_ptr) { devlink_free((struct devlink *)devlink_ptr); @@ -470,6 +632,7 @@ void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); + devlink_set_features(devlink, DEVLINK_F_RELOAD); devlink_register(devlink); } @@ -484,6 +647,36 @@ void ice_devlink_unregister(struct ice_pf *pf) devlink_unregister(priv_to_devlink(pf)); } +int ice_devlink_register_params(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value value; + int err; + + err = devlink_params_register(devlink, ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); + if (err) + return err; + + value.vbool = false; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, + value); + + value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + value); + + return 0; +} + +void ice_devlink_unregister_params(struct ice_pf *pf) +{ + devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); +} + /** * ice_devlink_create_pf_port - Create a devlink port for this PF * @pf: the PF to create a devlink port for @@ -597,16 +790,20 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf) } /** - * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents + * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents * @devlink: the devlink instance * @ops: the devlink region being snapshotted * @extack: extended ACK response structure * @data: on exit points to snapshot data buffer * * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for - * the shadow-ram devlink region. It captures a snapshot of the shadow ram - * contents. This snapshot can later be viewed via the devlink-region - * interface. + * the nvm-flash devlink region. It captures a snapshot of the full NVM flash + * contents, including both banks of flash. This snapshot can later be viewed + * via the devlink-region interface. + * + * It captures the flash using the FLASH_ONLY bit set when reading via + * firmware, so it does not read the current Shadow RAM contents. For that, + * use the shadow-ram region. * * @returns zero on success, and updates the data pointer. Returns a non-zero * error code on failure. @@ -618,9 +815,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *nvm_data; u32 nvm_size; + int status; nvm_size = hw->flash.flash_size; nvm_data = vzalloc(nvm_size); @@ -633,7 +830,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); vfree(nvm_data); - return -EIO; + return status; } status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); @@ -643,7 +840,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); ice_release_nvm(hw); vfree(nvm_data); - return -EIO; + return status; } ice_release_nvm(hw); @@ -654,6 +851,66 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, } /** + * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents + * @devlink: the devlink instance + * @ops: the devlink region being snapshotted + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for + * the shadow-ram devlink region. It captures a snapshot of the shadow ram + * contents. This snapshot can later be viewed via the devlink-region + * interface. + * + * @returns zero on success, and updates the data pointer. Returns a non-zero + * error code on failure. + */ +static int +ice_devlink_sram_snapshot(struct devlink *devlink, + const struct devlink_region_ops __always_unused *ops, + struct netlink_ext_ack *extack, u8 **data) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 *sram_data; + u32 sram_size; + int err; + + sram_size = hw->flash.sr_words * 2u; + sram_data = vzalloc(sram_size); + if (!sram_data) + return -ENOMEM; + + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + vfree(sram_data); + return err; + } + + /* Read from the Shadow RAM, rather than directly from NVM */ + err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true); + if (err) { + dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", + sram_size, err, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, + "Failed to read Shadow RAM contents"); + ice_release_nvm(hw); + vfree(sram_data); + return err; + } + + ice_release_nvm(hw); + + *data = sram_data; + + return 0; +} + +/** * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities * @devlink: the devlink instance * @ops: the devlink region being snapshotted @@ -675,8 +932,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, struct ice_pf *pf = devlink_priv(devlink); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; void *devcaps; + int status; devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); if (!devcaps) @@ -689,7 +946,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink, status, hw->adminq.sq_last_status); NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); vfree(devcaps); - return -EIO; + return status; } *data = (u8 *)devcaps; @@ -703,6 +960,12 @@ static const struct devlink_region_ops ice_nvm_region_ops = { .snapshot = ice_devlink_nvm_snapshot, }; +static const struct devlink_region_ops ice_sram_region_ops = { + .name = "shadow-ram", + .destructor = vfree, + .snapshot = ice_devlink_sram_snapshot, +}; + static const struct devlink_region_ops ice_devcaps_region_ops = { .name = "device-caps", .destructor = vfree, @@ -720,7 +983,7 @@ void ice_devlink_init_regions(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); - u64 nvm_size; + u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, @@ -731,6 +994,15 @@ void ice_devlink_init_regions(struct ice_pf *pf) pf->nvm_region = NULL; } + sram_size = pf->hw.flash.sr_words * 2u; + pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); + if (IS_ERR(pf->sram_region)) { + dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", + PTR_ERR(pf->sram_region)); + pf->sram_region = NULL; + } + pf->devcaps_region = devlink_region_create(devlink, &ice_devcaps_region_ops, 10, ICE_AQ_MAX_BUF_LEN); @@ -751,6 +1023,10 @@ void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) devlink_region_destroy(pf->nvm_region); + + if (pf->sram_region) + devlink_region_destroy(pf->sram_region); + if (pf->devcaps_region) devlink_region_destroy(pf->devcaps_region); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index b7f9551e4fc4..fe006d9946f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -8,6 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); +int ice_devlink_register_params(struct ice_pf *pf); +void ice_devlink_unregister_params(struct ice_pf *pf); int ice_devlink_create_pf_port(struct ice_pf *pf); void ice_devlink_destroy_pf_port(struct ice_pf *pf); int ice_devlink_create_vf_port(struct ice_vf *vf); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index d1d7389b0bff..864692b157b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -10,6 +10,100 @@ #include "ice_tc_lib.h" /** + * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC + * @pf: pointer to PF struct + * @vf: pointer to VF struct + * @mac: VF's MAC address + * + * This function adds advanced rule that forwards packets with + * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue. + */ +int +ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac) +{ + struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; + struct ice_adv_rule_info rule_info = { 0 }; + struct ice_adv_lkup_elem *list; + struct ice_hw *hw = &pf->hw; + const u16 lkups_cnt = 1; + int err; + + list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); + if (!list) + return -ENOMEM; + + list[0].type = ICE_MAC_OFOS; + ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac); + eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr); + + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.vsi_handle = ctrl_vsi->idx; + rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; + rule_info.rx = false; + rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + + ctrl_vsi->rxq_map[vf->vf_id]; + rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; + rule_info.flags_info.act_valid = true; + + err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, + vf->repr->mac_rule); + if (err) + dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d", + vf->vf_id); + else + vf->repr->rule_added = true; + + kfree(list); + return err; +} + +/** + * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC + * @vf: pointer to vF struct + * + * This function replays VF's MAC rule after reset. + */ +void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) +{ + int err; + + if (!ice_is_switchdev_running(vf->pf)) + return; + + if (is_valid_ether_addr(vf->hw_lan_addr.addr)) { + err = ice_eswitch_add_vf_mac_rule(vf->pf, vf, + vf->hw_lan_addr.addr); + if (err) { + dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n", + vf->hw_lan_addr.addr, vf->vf_id, err); + return; + } + vf->num_mac++; + + ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr); + } +} + +/** + * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC + * @vf: pointer to the VF struct + * + * Delete the advanced rule that was used to forward packets with the VF's MAC + * address (src MAC) to the corresponding switchdev ctrl VSI queue. + */ +void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) +{ + if (!ice_is_switchdev_running(vf->pf)) + return; + + if (!vf->repr->rule_added) + return; + + ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule); + vf->repr->rule_added = false; +} + +/** * ice_eswitch_setup_env - configure switchdev HW filters * @pf: pointer to PF struct * @@ -21,7 +115,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct net_device *uplink_netdev = uplink_vsi->netdev; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; - struct ice_port_info *pi = pf->hw.port_info; bool rule_added = false; ice_vsi_manage_vlan_stripping(ctrl_vsi, false); @@ -42,29 +135,17 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) rule_added = true; } - if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX)) - goto err_def_tx; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_uplink; if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_control; - if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id, - ICE_FLTR_TX, - ICE_SINGLE_ACT_LB_ENABLE)) - goto err_update_action; - return 0; -err_update_action: - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); err_override_control: ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); err_override_uplink: - ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); -err_def_tx: if (rule_added) ice_clear_dflt_vsi(uplink_vsi->vsw); err_def_rx: @@ -167,21 +248,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) netif_keep_dst(vf->repr->netdev); } - kfree(ctrl_vsi->target_netdevs); - - ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1, - sizeof(*ctrl_vsi->target_netdevs), - GFP_KERNEL); - if (!ctrl_vsi->target_netdevs) - goto err; - ice_for_each_vf(pf, i) { struct ice_repr *repr = pf->vf[i].repr; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; - ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev; - dst = repr->dst; dst->u.port_info.port_id = vsi->vsi_num; dst->u.port_info.lower_dev = repr->netdev; @@ -214,7 +285,6 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi) { int i; - kfree(ctrl_vsi->target_netdevs); ice_for_each_vf(pf, i) { struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; struct ice_vf *vf = &pf->vf[i]; @@ -320,7 +390,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf) ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); - ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX); ice_clear_dflt_vsi(uplink_vsi->vsw); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, @@ -375,24 +444,6 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf) } /** - * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI - * @vsi: VSI to setup rxdid on - * @rxdid: flex descriptor id - */ -static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid) -{ - struct ice_hw *hw = &vsi->back->hw; - int i; - - ice_for_each_rxq(vsi, i) { - struct ice_rx_ring *ring = vsi->rx_rings[i]; - u16 pf_q = vsi->rxq_map[ring->q_index]; - - ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); - } -} - -/** * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode * @pf: pointer to PF structure */ @@ -425,8 +476,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) ice_eswitch_napi_enable(pf); - ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); - return 0; err_setup_reprs: @@ -448,6 +497,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) ice_eswitch_napi_disable(pf); ice_eswitch_release_env(pf); + ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx); ice_eswitch_release_reprs(pf, ctrl_vsi); ice_vsi_release(ctrl_vsi); ice_repr_rem_from_all_vfs(pf); @@ -497,34 +547,6 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, } /** - * ice_eswitch_get_target_netdev - return port representor netdev - * @rx_ring: pointer to Rx ring - * @rx_desc: pointer to Rx descriptor - * - * When working in switchdev mode context (when control VSI is used), this - * function returns netdev of appropriate port representor. For non-switchdev - * context, regular netdev associated with Rx ring is returned. - */ -struct net_device * -ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) -{ - struct ice_32b_rx_flex_desc_nic_2 *desc; - struct ice_vsi *vsi = rx_ring->vsi; - struct ice_vsi *control_vsi; - u16 target_vsi_id; - - control_vsi = vsi->back->switchdev.control_vsi; - if (vsi != control_vsi) - return rx_ring->netdev; - - desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; - target_vsi_id = le16_to_cpu(desc->src_vsi); - - return vsi->target_netdevs[target_vsi_id]; -} - -/** * ice_eswitch_mode_get - get current eswitch mode * @devlink: pointer to devlink structure * @mode: output parameter for current eswitch mode @@ -648,7 +670,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf) return status; ice_eswitch_napi_enable(pf); - ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2); ice_eswitch_start_all_tx_queues(pf); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 364cd2a79c37..bd58d9d2e565 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -20,10 +20,11 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); void ice_eswitch_update_repr(struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); - -struct net_device * -ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc); +int +ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, + const u8 *mac); +void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf); +void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf); void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); @@ -33,6 +34,15 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); static inline void ice_eswitch_release(struct ice_pf *pf) { } static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { } +static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { } +static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { } + +static inline int +ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, + const u8 *mac) +{ + return -EOPNOTSUPP; +} static inline void ice_eswitch_set_target_vsi(struct sk_buff *skb, @@ -67,13 +77,6 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf) return false; } -static inline struct net_device * -ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) -{ - return rx_ring->netdev; -} - static inline netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 572519e402f4..e2e3ef7fba7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; - int ret = 0; + int ret; u8 *buf; dev = ice_pf_to_dev(pf); @@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, if (!buf) return -ENOMEM; - status = ice_acquire_nvm(hw, ICE_RES_READ); - if (status) { - dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_acquire_nvm(hw, ICE_RES_READ); + if (ret) { + dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } - status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, - false); - if (status) { - dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, + false); + if (ret) { + dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto release; } @@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf) static u64 ice_link_test(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); - enum ice_status status; bool link_up = false; + int status; netdev_info(netdev, "link test\n"); status = ice_get_link_status(np->vsi->port_info, &link_up); if (status) { - netdev_err(netdev, "link query error, status = %s\n", - ice_stat_str(status)); + netdev_err(netdev, "link query error, status = %d\n", + status); return 1; } @@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; - enum ice_status status; - int err = 0; + int err; pi = vsi->port_info; @@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - caps, NULL); - if (status) { - err = -EAGAIN; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + caps, NULL); + if (err) goto done; - } /* Set supported/configured FEC modes based on PHY capability */ if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC) @@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { - enum ice_status status; + int status; /* Disable FW LLDP engine */ status = ice_cfg_lldp_mib_change(&pf->hw, false); @@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED; pf->dcbx_cap |= DCB_CAP_DCBX_HOST; } else { - enum ice_status status; bool dcbx_agent_status; + int status; if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); @@ -1288,8 +1280,10 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) } if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down(vsi); - ice_up(vsi); + if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { + ice_down(vsi); + ice_up(vsi); + } } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported @@ -1938,8 +1932,7 @@ ice_get_link_ksettings(struct net_device *netdev, struct ice_aqc_get_phy_caps_data *caps; struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - enum ice_status status; - int err = 0; + int err; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); @@ -1990,12 +1983,10 @@ ice_get_link_ksettings(struct net_device *netdev, if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); + if (err) goto done; - } /* Set the advertised flow control based on the PHY capability */ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && @@ -2027,12 +2018,10 @@ ice_get_link_ksettings(struct net_device *netdev, caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); - status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(vsi->port_info, false, + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); + if (err) goto done; - } /* Set supported FEC modes based on PHY capability */ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); @@ -2210,11 +2199,10 @@ ice_set_link_ksettings(struct net_device *netdev, struct ice_pf *pf = np->vsi->back; struct ice_port_info *pi; u8 autoneg_changed = 0; - enum ice_status status; u64 phy_type_high = 0; u64 phy_type_low = 0; - int err = 0; bool linkup; + int err; pi = np->vsi->port_info; @@ -2234,15 +2222,13 @@ ice_set_link_ksettings(struct net_device *netdev, /* Get the PHY capabilities based on media */ if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - phy_caps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + phy_caps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - phy_caps, NULL); - if (status) { - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); + if (err) goto done; - } /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; @@ -2308,11 +2294,9 @@ ice_set_link_ksettings(struct net_device *netdev, /* Call to get the current link speed */ pi->phy.get_link_info = true; - status = ice_get_link_status(pi, &linkup); - if (status) { - err = -EIO; + err = ice_get_link_status(pi, &linkup); + if (err) goto done; - } curr_link_speed = pi->phy.link_info.link_speed; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); @@ -2381,10 +2365,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); - if (status) { + err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); + if (err) { netdev_info(netdev, "Set phy config failed,\n"); - err = -EIO; goto done; } @@ -2522,9 +2505,9 @@ static int ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; u64 hashed_flds; + int status; u32 hdrs; dev = ice_pf_to_dev(pf); @@ -2550,9 +2533,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); if (status) { - dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EINVAL; + dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", + vsi->vsi_num, status); + return status; } return 0; @@ -2686,7 +2669,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, } static void -ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2704,7 +2689,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) } static int -ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tx_ring *xdp_rings = NULL; @@ -2949,7 +2936,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_dcbx_cfg *dcbx_cfg; - enum ice_status status; + int status; /* Initialize pause params */ pause->rx_pause = 0; @@ -2999,11 +2986,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_vsi *vsi = np->vsi; struct ice_hw *hw = &pf->hw; struct ice_port_info *pi; - enum ice_status status; u8 aq_failures; bool link_up; - int err = 0; u32 is_an; + int err; pi = vsi->port_info; hw_link_info = &pi->phy.link_info; @@ -3029,11 +3015,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { kfree(pcaps); - return -EIO; + return err; } is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : @@ -3069,22 +3055,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EINVAL; /* Set the FC mode and only restart AN if link is up */ - status = ice_set_fc(pi, &aq_failures, link_up); + err = ice_set_fc(pi, &aq_failures, link_up); if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); err = -EAGAIN; } @@ -3924,16 +3907,16 @@ ice_get_module_info(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 sff8472_comp = 0; u8 sff8472_swap = 0; u8 sff8636_rev = 0; u8 value = 0; + int status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00, 0, &value, 1, 0, NULL); if (status) - return -EIO; + return status; switch (value) { case ICE_MODULE_TYPE_SFP: @@ -3941,12 +3924,12 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_SFF_8472_COMP, 0x00, 0, &sff8472_comp, 1, 0, NULL); if (status) - return -EIO; + return status; status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, ICE_MODULE_SFF_8472_SWAP, 0x00, 0, &sff8472_swap, 1, 0, NULL); if (status) - return -EIO; + return status; if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) { modinfo->type = ETH_MODULE_SFF_8079; @@ -3966,7 +3949,7 @@ ice_get_module_info(struct net_device *netdev, ICE_MODULE_REVISION_ADDR, 0x00, 0, &sff8636_rev, 1, 0, NULL); if (status) - return -EIO; + return status; /* Check revision compliance */ if (sff8636_rev > 0x02) { /* Module is SFF-8636 compliant */ @@ -4001,11 +3984,11 @@ ice_get_module_eeprom(struct net_device *netdev, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; bool is_sfp = false; unsigned int i, j; u16 offset = 0; u8 page = 0; + int status; if (!ee || !ee->len || !data) return -EINVAL; @@ -4013,7 +3996,7 @@ ice_get_module_eeprom(struct net_device *netdev, status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0, NULL); if (status) - return -EIO; + return status; if (value[0] == ICE_MODULE_TYPE_SFP) is_sfp = true; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 38960bcc384c..5d10c4f84a36 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -5,6 +5,7 @@ #include "ice.h" #include "ice_lib.h" +#include "ice_fdir.h" #include "ice_flow.h" static struct in6_addr full_ipv6_addr_mask = { @@ -205,7 +206,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) fsp->ring_cookie = RX_CLS_FLOW_DISC; else - fsp->ring_cookie = rule->q_index; + fsp->ring_cookie = rule->orig_q_index; idx = ice_ethtool_flow_to_fltr(fsp->flow_type); if (idx == ICE_FLTR_PTYPE_NONF_NONE) { @@ -257,6 +258,80 @@ release_lock: } /** + * ice_fdir_remap_entries - update the FDir entries in profile + * @prof: FDir structure pointer + * @tun: tunneled or non-tunneled packet + * @idx: FDir entry index + */ +static void +ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx) +{ + if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) { + int i; + + for (i = idx; i < (prof->cnt - 1); i++) { + u64 old_entry_h; + + old_entry_h = prof->entry_h[i + 1][tun]; + prof->entry_h[i][tun] = old_entry_h; + prof->vsi_h[i] = prof->vsi_h[i + 1]; + } + + prof->entry_h[i][tun] = 0; + prof->vsi_h[i] = 0; + } +} + +/** + * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules + * @hw: hardware structure containing filter list + * @vsi_idx: VSI handle + */ +void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx) +{ + int status, flow; + + if (!hw->fdir_prof) + return; + + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { + struct ice_fd_hw_prof *prof = hw->fdir_prof[flow]; + int tun, i; + + if (!prof || !prof->cnt) + continue; + + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + u64 prof_id; + + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + + for (i = 0; i < prof->cnt; i++) { + if (prof->vsi_h[i] != vsi_idx) + continue; + + prof->entry_h[i][tun] = 0; + prof->vsi_h[i] = 0; + break; + } + + /* after clearing FDir entries update the remaining */ + ice_fdir_remap_entries(prof, tun, i); + + /* find flow profile corresponding to prof_id and clear + * vsi_idx from bitmap. + */ + status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id); + if (status) { + dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n", + status); + } + } + prof->cnt--; + } +} + +/** * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow * @hw: hardware structure containing the filter list * @blk: hardware block @@ -514,6 +589,28 @@ ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow) } /** + * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure + * @prof: pointer to flow director HW profile + * @vsi_idx: vsi_idx to locate + * + * return the index of the vsi_idx. if vsi_idx is not found insert it + * into the vsi_h table. + */ +static u16 +ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx) +{ + u16 idx = 0; + + for (idx = 0; idx < prof->cnt; idx++) + if (prof->vsi_h[idx] == vsi_idx) + return idx; + + if (idx == prof->cnt) + prof->vsi_h[prof->cnt++] = vsi_idx; + return idx; +} + +/** * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule * @pf: pointer to the PF structure * @seg: protocol header description pointer @@ -530,11 +627,12 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *hw_prof; struct ice_hw *hw = &pf->hw; - enum ice_status status; u64 entry1_h = 0; u64 entry2_h = 0; + bool del_last; u64 prof_id; int err; + int idx; main_vsi = ice_get_main_vsi(pf); if (!main_vsi) @@ -581,24 +679,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, * actions (NULL) and zero actions 0. */ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - TNL_SEG_CNT(tun), &prof); - if (status) - return ice_status_to_errno(status); - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - main_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + TNL_SEG_CNT(tun), &prof); + if (err) + return err; + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + main_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); + if (err) goto err_prof; - } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - if (status) { - err = ice_status_to_errno(status); + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); + if (err) goto err_entry; - } hw_prof->fdir_seg[tun] = seg; hw_prof->entry_h[0][tun] = entry1_h; @@ -608,8 +702,60 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg, if (!hw_prof->cnt) hw_prof->cnt = 2; + for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) { + u16 vsi_idx; + u16 vsi_h; + + if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx]) + continue; + + entry1_h = 0; + vsi_h = main_vsi->tc_map_vsi[idx]->idx; + err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + main_vsi->idx, vsi_h, + ICE_FLOW_PRIO_NORMAL, seg, + &entry1_h); + if (err) { + dev_err(dev, "Could not add Channel VSI %d to flow group\n", + idx); + goto err_unroll; + } + + vsi_idx = ice_fdir_prof_vsi_idx(hw_prof, + main_vsi->tc_map_vsi[idx]->idx); + hw_prof->entry_h[vsi_idx][tun] = entry1_h; + } + return 0; +err_unroll: + entry1_h = 0; + hw_prof->fdir_seg[tun] = NULL; + + /* The variable del_last will be used to determine when to clean up + * the VSI group data. The VSI data is not needed if there are no + * segments. + */ + del_last = true; + for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++) + if (hw_prof->fdir_seg[idx]) { + del_last = false; + break; + } + + for (idx = 0; idx < hw_prof->cnt; idx++) { + u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]); + + if (!hw_prof->entry_h[idx][tun]) + continue; + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]); + hw_prof->entry_h[idx][tun] = 0; + if (del_last) + hw_prof->vsi_h[idx] = 0; + } + if (del_last) + hw_prof->cnt = 0; err_entry: ice_rem_prof_id_flow(hw, ICE_BLK_FD, ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id); @@ -1174,6 +1320,31 @@ err_exit: } /** + * ice_update_per_q_fltr + * @vsi: ptr to VSI + * @q_index: queue index + * @inc: true to increment or false to decrement per queue filter count + * + * This function is used to keep track of per queue sideband filters + */ +static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc) +{ + struct ice_rx_ring *rx_ring; + + if (!vsi->num_rxq || q_index >= vsi->num_rxq) + return; + + rx_ring = vsi->rx_rings[q_index]; + if (!rx_ring || !rx_ring->ch) + return; + + if (inc) + atomic_inc(&rx_ring->ch->num_sb_fltr); + else + atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr); +} + +/** * ice_fdir_write_fltr - send a flow director filter to the hardware * @pf: PF data structure * @input: filter structure @@ -1190,7 +1361,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, struct ice_hw *hw = &pf->hw; struct ice_fltr_desc desc; struct ice_vsi *ctrl_vsi; - enum ice_status status; u8 *pkt, *frag_pkt; bool has_frag; int err; @@ -1209,11 +1379,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, } ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (err) goto err_free_all; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); if (err) goto err_free_all; @@ -1223,12 +1391,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, if (has_frag) { /* does not return error */ ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, - is_tun); - if (status) { - err = ice_status_to_errno(status); + err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true, + is_tun); + if (err) goto err_frag; - } err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt); if (err) goto err_frag; @@ -1268,7 +1434,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool is_tun = tun == ICE_FD_HW_SEG_TUN; int err; - if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num)) + if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL)) continue; err = ice_fdir_write_fltr(pf, input, add, is_tun); if (err) @@ -1324,13 +1490,32 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf) } /** + * ice_fdir_del_all_fltrs - Delete all flow director filters + * @vsi: the VSI being changed + * + * This function needs to be called while holding hw->fdir_fltr_lock + */ +void ice_fdir_del_all_fltrs(struct ice_vsi *vsi) +{ + struct ice_fdir_fltr *f_rule, *tmp; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + + list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { + ice_fdir_write_all_fltr(pf, f_rule, false); + ice_fdir_update_cntrs(hw, f_rule->flow_type, false); + list_del(&f_rule->fltr_node); + devm_kfree(ice_pf_to_dev(pf), f_rule); + } +} + +/** * ice_vsi_manage_fdir - turn on/off flow director * @vsi: the VSI being changed * @ena: boolean value indicating if this is an enable or disable request */ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) { - struct ice_fdir_fltr *f_rule, *tmp; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_fltr_ptype flow; @@ -1344,13 +1529,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena) mutex_lock(&hw->fdir_fltr_lock); if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags)) goto release_lock; - list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) { - /* ignore return value */ - ice_fdir_write_all_fltr(pf, f_rule, false); - ice_fdir_update_cntrs(hw, f_rule->flow_type, false); - list_del(&f_rule->fltr_node); - devm_kfree(ice_hw_to_dev(hw), f_rule); - } + + ice_fdir_del_all_fltrs(vsi); if (hw->fdir_prof) for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; @@ -1401,18 +1581,25 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, { struct ice_fdir_fltr *old_fltr; struct ice_hw *hw = &pf->hw; + struct ice_vsi *vsi; int err = -ENOENT; /* Do not update filters during reset */ if (ice_is_reset_in_progress(pf->state)) return -EBUSY; + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EINVAL; + old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx); if (old_fltr) { err = ice_fdir_write_all_fltr(pf, old_fltr, false); if (err) return err; ice_fdir_update_cntrs(hw, old_fltr->flow_type, false); + /* update sb-filters count, specific to ring->channel */ + ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false); if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type]) /* we just deleted the last filter of flow_type so we * should also delete the HW filter info. @@ -1424,6 +1611,8 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input, if (!input) return err; ice_fdir_list_add_fltr(hw, input); + /* update sb-filters count, specific to ring->channel */ + ice_update_per_q_fltr(vsi, input->orig_q_index, true); ice_fdir_update_cntrs(hw, input->flow_type, true); return 0; } @@ -1463,6 +1652,39 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) } /** + * ice_update_ring_dest_vsi - update dest ring and dest VSI + * @vsi: pointer to target VSI + * @dest_vsi: ptr to dest VSI index + * @ring: ptr to dest ring + * + * This function updates destination VSI and queue if user specifies + * target queue which falls in channel's (aka ADQ) queue region + */ +static void +ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring) +{ + struct ice_channel *ch; + + list_for_each_entry(ch, &vsi->ch_list, list) { + if (!ch->ch_vsi) + continue; + + /* make sure to locate corresponding channel based on "queue" + * specified + */ + if ((*ring < ch->base_q) || + (*ring >= (ch->base_q + ch->num_rxq))) + continue; + + /* update the dest_vsi based on channel */ + *dest_vsi = ch->ch_vsi->idx; + + /* update the "ring" to be correct based on channel */ + *ring -= ch->base_q; + } +} + +/** * ice_set_fdir_input_set - Set the input set for Flow Director * @vsi: pointer to target VSI * @fsp: pointer to ethtool Rx flow specification @@ -1473,6 +1695,7 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, struct ice_fdir_fltr *input) { u16 dest_vsi, q_index = 0; + u16 orig_q_index = 0; struct ice_pf *pf; struct ice_hw *hw; int flow_type; @@ -1499,6 +1722,8 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, if (ring >= vsi->num_rxq) return -EINVAL; + orig_q_index = ring; + ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring); dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; q_index = ring; } @@ -1507,6 +1732,11 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, input->q_index = q_index; flow_type = fsp->flow_type & ~FLOW_EXT; + /* Record the original queue index as specified by user. + * with channel configuration 'q_index' becomes relative + * to TC (channel). + */ + input->orig_q_index = orig_q_index; input->dest_vsi = dest_vsi; input->dest_ctl = dest_ctl; input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID; @@ -1652,7 +1882,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) } /* return error if not an update and no available filters */ - fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1; + fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1; if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) && ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) { dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n"); @@ -1694,6 +1924,8 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) remove_sw_rule: ice_fdir_update_cntrs(hw, input->flow_type, false); + /* update sb-filters count, specific to ring->channel */ + ice_update_per_q_fltr(vsi, input->orig_q_index, false); list_del(&input->fltr_node); release_lock: mutex_unlock(&hw->fdir_fltr_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index cbd8424631e3..ae089d32ee9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, * @hw: pointer to the hardware structure * @cntr_id: returns counter index */ -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id) * @hw: pointer to the hardware structure * @cntr_id: counter index to be freed */ -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id); @@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) * @cntr_id: returns counter index * @num_fltr: number of filter entries to be allocated */ -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES, ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr, @@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr) * @frag: generate a fragment packet * @tun: true implies generate a tunnel packet */ -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun) { @@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, if (ice_fdir_pkt[idx].flow == flow) break; if (idx == ICE_FDIR_NUM_PKT) - return ICE_ERR_PARAM; + return -EINVAL; if (!tun) { memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len); loc = pkt; } else { - if (!ice_get_open_tunnel_port(hw, &tnl_port)) - return ICE_ERR_DOES_NOT_EXIST; + if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL)) + return -ENOENT; if (!ice_fdir_pkt[idx].tun_pkt) - return ICE_ERR_PARAM; + return -EINVAL; memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, ice_fdir_pkt[idx].tun_pkt_len); ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET, @@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } if (input->flex_fltr) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index da4163856f4c..1b9b84490689 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -182,6 +182,7 @@ struct ice_fdir_fltr { /* filter control */ u16 q_index; + u16 orig_q_index; u16 dest_vsi; u8 dest_ctl; u8 cnt_ena; @@ -201,16 +202,14 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; -enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); -enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); -enum ice_status -ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); -enum ice_status -ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); +int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); +int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); +int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); void ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input, struct ice_fltr_desc *fdesc, bool add); -enum ice_status +int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 23cfcceb1536..4deb2c9446ec 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, } /** + * ice_hw_ptype_ena - check if the PTYPE is enabled or not + * @hw: pointer to the HW structure + * @ptype: the hardware PTYPE + */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) +{ + return ptype < ICE_FLOW_PTYPE_MAX && + test_bit(ptype, hw->hw_ptype); +} + +/** + * ice_marker_ptype_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the Marker PType TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual Marker PType TCAM entries. + */ +static void * +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_marker_ptype_tcam_section *marker_ptype; + + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE) + return NULL; + + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + marker_ptype = section; + if (index >= le16_to_cpu(marker_ptype->count)) + return NULL; + + return marker_ptype->tcam + index; +} + +/** + * ice_fill_hw_ptype - fill the enabled PTYPE bit information + * @hw: pointer to the HW structure + */ +static void ice_fill_hw_ptype(struct ice_hw *hw) +{ + struct ice_marker_ptype_tcam_entry *tcam; + struct ice_seg *seg = hw->seg; + struct ice_pkg_enum state; + + bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX); + if (!seg) + return; + + memset(&state, 0, sizeof(state)); + + do { + tcam = ice_pkg_enum_entry(seg, &state, + ICE_SID_RXPARSER_MARKER_PTYPE, NULL, + ice_marker_ptype_tcam_handler); + if (tcam && + le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX && + le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX) + set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype); + + seg = NULL; + } while (tcam); +} + +/** * ice_boost_tcam_handler * @sect_type: section type * @section: pointer to section @@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) * if it is found. The ice_seg parameter must not be NULL since the first call * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. */ -static enum ice_status +static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, struct ice_boost_tcam_entry **entry) { @@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, memset(&state, 0, sizeof(state)); if (!ice_seg) - return ICE_ERR_PARAM; + return -EINVAL; do { tcam = ice_pkg_enum_entry(ice_seg, &state, @@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, } while (tcam); *entry = NULL; - return ICE_ERR_CFG; + return -EIO; } /** @@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) * ------------------------------ * Result: key: b01 10 11 11 00 00 */ -static enum ice_status +static int ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { @@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) - return ICE_ERR_CFG; + return -EIO; *key = 0; *key_inv = 0; @@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ -static enum ice_status +static int ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len) { @@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, /* size must be a multiple of 2 bytes. */ if (size % 2) - return ICE_ERR_CFG; + return -EIO; half_size = size / 2; if (off + len > half_size) - return ICE_ERR_CFG; + return -EIO; /* Make sure at most one bit is set in the never match mask. Having more * than one never match mask bit set will cause HW to consume excessive @@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, */ #define ICE_NVR_MTCH_BITS_MAX 1 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; + return -EIO; for (i = 0; i < len; i++) if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, dc ? dc[i] : 0, nm ? nm[i] : 0, key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, * or writing of the package. When attempting to obtain write access, the * caller must check for the following two return values: * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status + * 0 - Means the caller has acquired the global config lock + * and can perform writing of the package. + * -EALREADY - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +static int ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { - enum ice_status status; + int status; status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (!status) mutex_lock(&ice_global_cfg_lock_sw); - else if (status == ICE_ERR_AQ_NO_WORK) + else if (status == -EALREADY) ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; @@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw) * * This function will request ownership of the change lock. */ -enum ice_status +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, @@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw) * * Download Package (0x0C40) */ -static enum ice_status +static int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, * * Update Package (0x0C42) */ -static enum ice_status +static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (error_offset) *error_offset = 0; @@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; @@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, * * Obtains change lock and updates package. */ -static enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; u32 offset, info, i; + int status; status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) @@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) return status; } +static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_FILE_REVISION_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_LOAD_ERROR; + default: + return ICE_DDP_PKG_ERR; + } +} + /** * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure @@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * to the firmware. Metadata buffers are skipped, and the first metadata buffer * found indicates that the rest of the buffers are all metadata buffers. */ -static enum ice_status +static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_buf_hdr *bh; + enum ice_aq_err err; u32 offset, info, i; + int status; if (!bufs || !count) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is @@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) */ bh = (struct ice_buf_hdr *)bufs; if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return 0; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; + return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; + if (status == -EALREADY) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } for (i = 0; i < count; i++) { @@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) &offset, &info, NULL); /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); - + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); break; } @@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) ice_release_global_cfg_lock(hw); - return status; + return state; } /** @@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * * Get Package Info List (0x0C43) */ -static enum ice_status +static int ice_aq_get_pkg_info_list(struct ice_hw *hw, struct ice_aqc_get_pkg_info_resp *pkg_info, u16 buf_size, struct ice_sq_cd *cd) @@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw, * * Handles the download of a complete package. */ -static enum ice_status +static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; @@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) * * Saves off the package details into the HW structure. */ -static enum ice_status +static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { struct ice_generic_seg_hdr *seg_hdr; if (!pkg_hdr) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); if (seg_hdr) { @@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) ICE_SID_METADATA); if (!meta) { ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } hw->pkg_ver = meta->ver; @@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) * * Store details of the package currently loaded in HW into the HW structure. */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) +static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) { + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; u16 size; u32 i; size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = kzalloc(size, GFP_KERNEL); if (!pkg_info) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; goto init_pkg_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 @@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) init_pkg_free_alloc: kfree(pkg_info); - return status; + return state; } /** @@ -1176,28 +1259,28 @@ init_pkg_free_alloc: * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; if (len < struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* pkg must have at least one segment */ seg_count = le32_to_cpu(pkg->seg_count); if (seg_count < 1) - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; /* make sure segment array fits in package length */ if (len < struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { @@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) /* segment header must fit */ if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); /* segment body must fit */ if (len < off + le32_to_cpu(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; + return ICE_DDP_PKG_INVALID_FILE; } - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw) * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR * definitions. */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) { - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; - return 0; + return ICE_DDP_PKG_SUCCESS; } /** @@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This function checks the package version compatibility with driver and NVM */ -static enum ice_status +static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; + enum ice_ddp_state state; u16 size; u32 i; /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; + return state; } /* find ICE segment in given package */ @@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; + return ICE_DDP_PKG_INVALID_FILE; } /* Check if FW is compatible with the OS package */ size = struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = kzalloc(size, GFP_KERNEL); if (!pkg) - return ICE_ERR_NO_MEMORY; + return ICE_DDP_PKG_ERR; - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_LOAD_ERROR; goto fw_ddp_compat_free_alloc; + } for (i = 0; i < le32_to_cpu(pkg->count); i++) { /* loop till we find the NVM package */ @@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, pkg->pkg_info[i].ver.major || (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; + state = ICE_DDP_PKG_FW_MISMATCH; ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ @@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } fw_ddp_compat_free_alloc: kfree(pkg); - return status; + return state; } /** @@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) * and store the index number in struct ice_switch_info *switch_info * in HW for following use. */ -static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) +static int ice_get_prof_index_max(struct ice_hw *hw) { u16 prof_index = 0, j, max_prof_index = 0; struct ice_pkg_enum state; @@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) memset(&state, 0, sizeof(state)); if (!hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; @@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) } /** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer @@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw) * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) { + bool already_loaded = false; + enum ice_ddp_state state; struct ice_pkg_hdr *pkg; - enum ice_status status; struct ice_seg *seg; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { + state = ice_verify_pkg(pkg, len); + if (state) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; + state); + return state; } /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; /* before downloading the package, check package version for * compatibility with driver */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { + state = ice_download_pkg(hw, seg); + if (state == ICE_DDP_PKG_ALREADY_LOADED) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = 0; + already_loaded = true; } /* Get information on the package currently loaded in HW, then make sure * the driver is compatible with this version. */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); } - if (!status) { + if (ice_is_init_pkg_successful(state)) { hw->seg = seg; /* on successful package download update other required * registers to support the package and fill HW tables @@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); + ice_fill_hw_ptype(hw); ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); + state); } - return status; + return state; } /** @@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) * package buffer, as the new copy will be managed by this function and * related routines. */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) { - enum ice_status status; + enum ice_ddp_state state; u8 *buf_copy; if (!buf || !len) - return ICE_ERR_PARAM; + return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); - status = ice_init_pkg(hw, buf_copy, len); - if (status) { + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { /* Free the copy, since we failed to initialize the package */ devm_kfree(ice_hw_to_dev(hw), buf_copy); } else { @@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) hw->pkg_size = len; } - return status; + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } } /** @@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, * NOTE: The caller of the function is responsible for freeing the memory * allocated for every list entry. */ -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list) { @@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, memset(&state, 0, sizeof(state)); if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; + return -EINVAL; ice_seg = hw->seg; do { @@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, } } while (fv); if (list_empty(fv_list)) - return ICE_ERR_CFG; + return -EIO; return 0; err: @@ -1711,7 +1847,7 @@ err: devm_kfree(ice_hw_to_dev(hw), fvl); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ -static enum ice_status +static int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; @@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) u16 data_end; if (!bld) - return ICE_ERR_PARAM; + return -EINVAL; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't increase table size */ section_count = le16_to_cpu(buf->section_count); if (section_count > 0) - return ICE_ERR_CFG; + return -EIO; if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; + return -EIO; bld->reserved_section_table_entries += count; data_end = le16_to_cpu(buf->data_end) + @@ -1899,9 +2035,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @port: returns open port + * @type: type of tunnel, can be TNL_LAST if it doesn't matter */ bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type) { bool res = false; u16 i; @@ -1909,7 +2047,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port) mutex_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) - if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) { + if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port && + (type == TNL_LAST || type == hw->tnl.tbl[i].type)) { *port = hw->tnl.tbl[i].port; res = true; break; @@ -1956,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type, * creating a package buffer with the tunnel info and issuing an update package * command. */ -static enum ice_status +static int ice_create_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_tunnel_end; } @@ -2027,26 +2166,26 @@ ice_create_tunnel_end: * targeting the specific updates requested and then performing an update * package. */ -static enum ice_status +static int ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; - enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; + int status = -ENOSPC; mutex_lock(&hw->tnl_lock); if (WARN_ON(!hw->tnl.tbl[index].valid || hw->tnl.tbl[index].type != type || hw->tnl.tbl[index].port != port)) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto ice_destroy_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_destroy_tunnel_end; } @@ -2094,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; u16 index; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; @@ -2102,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error adding UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error adding UDP tunnel - %d\n", + status); return -EIO; } @@ -2118,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; enum ice_tunnel_type tnl_type; - enum ice_status status; + int status; tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type, ntohs(ti->port)); if (status) { - netdev_err(netdev, "Error removing UDP tunnel - %s\n", - ice_stat_str(status)); + netdev_err(netdev, "Error removing UDP tunnel - %d\n", + status); return -EIO; } @@ -2142,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (fv_idx >= hw->blk[blk].es.fvw) - return ICE_ERR_PARAM; + return -EINVAL; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); @@ -2175,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ -static enum ice_status +static int ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) - return ICE_ERR_PARAM; + return -EINVAL; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return 0; @@ -2209,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ -static enum ice_status +static int ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) - return ICE_ERR_CFG; + return -EIO; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; @@ -2256,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ -static enum ice_status +static int ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { - enum ice_status status; u8 original_ptg; + int status; if (ptype > ICE_XLT1_CNT - 1) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) @@ -2401,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2) * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ -static enum ice_status +static int ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be @@ -2470,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) * for, the list must match exactly, including the order in which the * characteristics are listed. */ -static enum ice_status +static int ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct list_head *chs, u16 *vsig) { @@ -2484,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2496,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ -static enum ice_status -ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) +static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; struct ice_vsig_vsi *vsi_cur; @@ -2505,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; @@ -2557,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ -static enum ice_status +static int ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; @@ -2566,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) @@ -2577,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) - return ICE_ERR_CFG; + return -EIO; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); @@ -2594,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) /* verify if VSI was removed from group list */ if (!vsi_cur) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; @@ -2615,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ -static enum ice_status +static int ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; - enum ice_status status; u16 orig_vsig, idx; + int status; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) - return ICE_ERR_PARAM; + return -EINVAL; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) @@ -2738,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) * @masks: masks for FV * @prof_id: receives the profile ID */ -static enum ice_status +static int ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u16 *masks, u8 *prof_id) { @@ -2749,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, * field vector and mask. This will cause rule interference. */ if (blk == ICE_BLK_FD) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; for (i = 0; i < (u8)es->count; i++) { u16 off = i * es->fvw; @@ -2765,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -2818,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ -static enum ice_status +static int ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } @@ -2838,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, * * This function frees an entry in a Profile ID TCAM for a specific block. */ -static enum ice_status +static int ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } @@ -2858,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ -static enum ice_status -ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) +static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { - enum ice_status status; u16 res_type; u16 get_prof; + int status; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) @@ -2883,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) * * This function frees a profile ID, which also corresponds to a Field Vector. */ -static enum ice_status -ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) - return ICE_ERR_PARAM; + return -EINVAL; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } @@ -2901,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ -static enum ice_status -ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) +static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; hw->blk[blk].es.ref_count[prof_id]++; @@ -3022,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw) * @mask: the 16-bit mask * @mask_idx: variable to receive the mask index */ -static enum ice_status +static int ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, u16 *mask_idx) { bool found_unused = false, found_copy = false; - enum ice_status status = ICE_ERR_MAX_LIMIT; u16 unused_idx = 0, copy_idx = 0; + int status = -ENOSPC; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->blk[blk].masks.lock); @@ -3090,15 +3225,15 @@ err_ice_alloc_prof_mask: * @blk: hardware block * @mask_idx: index of mask */ -static enum ice_status +static int ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) { if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; if (!(mask_idx >= hw->blk[blk].masks.first && mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; mutex_lock(&hw->blk[blk].masks.lock); @@ -3132,14 +3267,14 @@ exit_ice_free_prof_mask: * @blk: hardware block * @prof_id: profile ID */ -static enum ice_status +static int ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) { u32 mask_bm; u16 i; if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) - return ICE_ERR_PARAM; + return -EINVAL; mask_bm = hw->blk[blk].es.mask_ena[prof_id]; for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) @@ -3194,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw) * @prof_id: profile ID * @masks: masks */ -static enum ice_status +static int ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, u16 *masks) { @@ -3224,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, if (ena_mask & BIT(i)) ice_free_prof_mask(hw, blk, i); - return ICE_ERR_OUT_OF_RANGE; + return -EIO; } /* enable the masks for this profile */ @@ -3266,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ -static enum ice_status +static int ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) - return ICE_ERR_PARAM; + return -EINVAL; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { @@ -3708,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) * ice_init_hw_tbls - init hardware table memory * @hw: pointer to the hardware structure */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +int ice_init_hw_tbls(struct ice_hw *hw) { u8 i; @@ -3827,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) err: ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -3843,7 +3978,7 @@ err: * @nm_msk: never match mask * @key: output of profile ID key */ -static enum ice_status +static int ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3899,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, * @dc_msk: don't care mask * @nm_msk: never match mask */ -static enum ice_status +static int ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], @@ -3907,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; - enum ice_status status; + int status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); @@ -3926,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ -static enum ice_status +static int ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; @@ -3935,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { @@ -3976,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -3996,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, sizeof(p->es[0])); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->prof_id); @@ -4014,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4030,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct_size(p, entry, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); @@ -4050,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4066,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->ptype); @@ -4082,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct list_head *chgs) { @@ -4101,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct_size(p, value, 1)); if (!p) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; p->count = cpu_to_le16(1); p->offset = cpu_to_le16(tmp->vsi); @@ -4121,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, * @blk: hardware block * @chgs: the list of changes to make in hardware */ -static enum ice_status +static int ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct list_head *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; - enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; + int status; u16 sects; /* count number of sections we need */ @@ -4164,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_pkg_buf_reserve_section(b, sects); if (status) @@ -4201,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { - status = ICE_ERR_INVAL_SIZE; + status = -EINVAL; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); - if (status == ICE_ERR_AQ_ERROR) + if (status == -EIO) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: @@ -4273,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { * @prof_id: profile ID * @es: extraction sequence (length of array is determined by the block) */ -static enum ice_status +static int ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) { DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); @@ -4305,7 +4440,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) if (es[i].prot_id == ice_fd_pairs[j].prot_id && es[i].off == ice_fd_pairs[j].off) { - set_bit(j, pair_list); + __set_bit(j, pair_list); pair_start[j] = i; } } @@ -4328,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) /* check for room */ if (first_free + 1 < (s8)ice_fd_pairs[index].count) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* place in extraction sequence */ for (k = 0; k < ice_fd_pairs[index].count; k++) { @@ -4338,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) ice_fd_pairs[index].off + (k * 2); if (k > first_free) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* keep track of non-relevant fields */ mask_sel |= BIT(first_free - k); @@ -4449,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, * @attr: array of attributes that will be considered * @attr_cnt: number of elements in the attribute array */ -static enum ice_status +static int ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, const struct ice_ptype_attributes *attr, u16 attr_cnt) { @@ -4465,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, &prof->attr[prof->ptg_cnt]); if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } if (!found) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; return 0; } @@ -4490,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ -enum ice_status +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks) @@ -4498,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; - enum ice_status status; u8 byte = 0; u8 prof_id; + int status; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -4538,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], /* add profile info */ prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL); if (!prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof; } @@ -4575,13 +4710,13 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], if (test_bit(ptg, ptgs_used)) continue; - set_bit(ptg, ptgs_used); + __set_bit(ptg, ptgs_used); /* Check to see there are any attributes for * this PTYPE, and add them if found. */ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt); - if (status == ICE_ERR_MAX_LIMIT) + if (status == -ENOSPC) break; if (status) { /* This is simple a PTYPE/PTG with no @@ -4658,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) * @blk: hardware block * @idx: the index to release */ -static enum ice_status -ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) +static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status; + int status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, @@ -4685,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) * @blk: hardware block * @prof: pointer to profile structure to remove */ -static enum ice_status +static int ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { - enum ice_status status; + int status; u16 i; for (i = 0; i < prof->tcam_count; i++) @@ -4698,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) - return ICE_ERR_HW_TABLE; + return -EIO; } return 0; @@ -4711,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, * @vsig: the VSIG to remove * @chg: the change list */ -static enum ice_status +static int ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; + int status; /* remove TCAM entries */ list_for_each_entry_safe(d, t, @@ -4745,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; @@ -4768,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ -static enum ice_status +static int ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct list_head *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; + int status; list_for_each_entry_safe(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, @@ -4792,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, return status; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -4801,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, * @blk: hardware block * @id: profile tracking ID */ -static enum ice_status -ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) +static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; - enum ice_status status; struct list_head chg; + int status; u16 i; INIT_LIST_HEAD(&chg); @@ -4842,16 +4975,16 @@ err_ice_rem_flow_all: * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ -enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; - enum ice_status status; + int status; mutex_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_rem_prof; } @@ -4878,20 +5011,20 @@ err_ice_rem_prof: * @hdl: profile handle * @chg: change list */ -static enum ice_status +static int ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_get_prof; } @@ -4901,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_get_prof; } @@ -4933,7 +5066,7 @@ err_ice_get_prof: * * This routine makes a copy of the list of profiles in the specified VSIG. */ -static enum ice_status +static int ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *lst) { @@ -4961,7 +5094,7 @@ err_ice_get_profs_vsig: devm_kfree(ice_hw_to_dev(hw), ent1); } - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -4971,25 +5104,25 @@ err_ice_get_profs_vsig: * @lst: the list to be added to * @hdl: profile handle of entry to add */ -static enum ice_status +static int ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; + int status = 0; u16 i; mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_to_lst; } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_to_lst; } @@ -5018,17 +5151,17 @@ err_ice_add_prof_to_lst: * @vsig: the VSIG to move the VSI to * @chg: the change list */ -static enum ice_status +static int ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) @@ -5078,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) * * This function appends an enable or disable TCAM entry in the change log */ -static enum ice_status +static int ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; + int status; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; @@ -5117,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, tcam->attr.flags, @@ -5151,13 +5284,13 @@ err_ice_prof_tcam_ena_dis: * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ -static enum ice_status +static int ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 idx; bitmap_zero(ptgs_used, ICE_XLT1_CNT); @@ -5206,7 +5339,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, } /* keep track of used ptgs */ - set_bit(t->tcam[i].ptg, ptgs_used); + __set_bit(t->tcam[i].ptg, ptgs_used); } } @@ -5222,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, * @rev: true to add entries to the end of the list * @chg: the change list */ -static enum ice_status +static int ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct list_head *chg) { @@ -5230,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; - enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; + int status = 0; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; /* new VSIG profile structure */ t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL); if (!t) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto err_ice_add_prof_id_vsig; } @@ -5264,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_prof_id_vsig; } @@ -5334,21 +5467,21 @@ err_ice_add_prof_id_vsig: * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct list_head *chg) { - enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; + int status; p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); if (!p) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { - status = ICE_ERR_HW_TABLE; + status = -EIO; goto err_ice_create_prof_id_vsig; } @@ -5384,18 +5517,18 @@ err_ice_create_prof_id_vsig: * @new_vsig: return of new VSIG * @chg: the change list */ -static enum ice_status +static int ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct list_head *lst, u16 *new_vsig, struct list_head *chg) { struct ice_vsig_prof *t; - enum ice_status status; + int status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) - return ICE_ERR_HW_TABLE; + return -EIO; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) @@ -5425,8 +5558,8 @@ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; - enum ice_status status; struct list_head lst; + int status; INIT_LIST_HEAD(&lst); @@ -5456,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head union_lst; - enum ice_status status; struct list_head chg; + int status; u16 vsig; INIT_LIST_HEAD(&union_lst); @@ -5489,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto err_ice_add_prof_id_flow; } @@ -5597,7 +5730,7 @@ err_ice_add_prof_id_flow: * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ -static enum ice_status +static int ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; @@ -5609,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) return 0; } - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -5623,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct list_head chg, copy; - enum ice_status status; + int status; u16 vsig; INIT_LIST_HEAD(©); @@ -5724,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) } } } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } /* update hardware tables */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index 344c2637facd..6cbc29bcb02f 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -18,10 +18,67 @@ #define ICE_PKG_CNT 4 -enum ice_status +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* The signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10, + + /* An error occurred in firmware while loading the DDP package */ + ICE_DDP_PKG_LOAD_ERROR = -11, + + /* Other errors */ + ICE_DDP_PKG_ERR = -12 +}; + +int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); -enum ice_status +int ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); void @@ -29,32 +86,37 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, unsigned long *bm); void ice_init_prof_result_bm(struct ice_hw *hw); -enum ice_status +int ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, unsigned long *bm, struct list_head *fv_list); bool -ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port); +ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, + enum ice_tunnel_type type); int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); -enum ice_status +/* Rx parser PTYPE functions */ +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); + +/* XLT2/VSI group functions */ +int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, struct ice_fv_word *es, u16 *masks); -enum ice_status +int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status +int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); -enum ice_status ice_init_hw_tbls(struct ice_hw *hw); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +int ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); -enum ice_status -ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); +int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 0f572a36d021..fc087e0b5292 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -160,6 +160,7 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_TXPARSER_BOOST_TCAM 66 @@ -201,6 +202,24 @@ enum ice_sect { ICE_SECT_COUNT }; +/* Packet Type (PTYPE) values */ +#define ICE_PTYPE_MAC_PAY 1 +#define ICE_PTYPE_IPV4_PAY 23 +#define ICE_PTYPE_IPV4_UDP_PAY 24 +#define ICE_PTYPE_IPV4_TCP_PAY 26 +#define ICE_PTYPE_IPV4_SCTP_PAY 27 +#define ICE_PTYPE_IPV6_PAY 89 +#define ICE_PTYPE_IPV6_UDP_PAY 90 +#define ICE_PTYPE_IPV6_TCP_PAY 92 +#define ICE_PTYPE_IPV6_SCTP_PAY 93 +#define ICE_MAC_IPV4_ESP 160 +#define ICE_MAC_IPV6_ESP 161 +#define ICE_MAC_IPV4_AH 162 +#define ICE_MAC_IPV6_AH 163 +#define ICE_MAC_IPV4_NAT_T_ESP 164 +#define ICE_MAC_IPV6_NAT_T_ESP 165 +#define ICE_MAC_IPV4_GTPU 329 +#define ICE_MAC_IPV6_GTPU 330 #define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331 #define ICE_MAC_IPV4_GTPU_IPV4_PAY 332 #define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333 @@ -221,6 +240,10 @@ enum ice_sect { #define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348 #define ICE_MAC_IPV6_GTPU_IPV6_TCP 349 #define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350 +#define ICE_MAC_IPV4_PFCP_SESSION 352 +#define ICE_MAC_IPV6_PFCP_SESSION 354 +#define ICE_MAC_IPV4_L2TPV3 360 +#define ICE_MAC_IPV6_L2TPV3 361 /* Attributes that can modify PTYPE definitions. * @@ -329,6 +352,25 @@ struct ice_boost_tcam_section { sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) +/* package Marker Ptype TCAM entry */ +struct ice_marker_ptype_tcam_entry { +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024 + __le16 addr; + __le16 ptype; + u8 keys[20]; +}; + +struct ice_marker_ptype_tcam_section { + __le16 count; + __le16 reserved; + struct ice_marker_ptype_tcam_entry tcam[]; +}; + +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \ + ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \ + sizeof(struct ice_marker_ptype_tcam_entry), \ + sizeof(struct ice_marker_ptype_tcam_entry)) + struct ice_xlt1_section { __le16 count; __le16 offset; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index f160672448a0..beed4838dcbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -609,8 +609,6 @@ struct ice_flow_prof_params { ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \ ICE_FLOW_SEG_HDR_NAT_T_ESP) -#define ICE_FLOW_SEG_HDRS_L2_MASK \ - (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) #define ICE_FLOW_SEG_HDRS_L3_MASK \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ @@ -625,8 +623,7 @@ struct ice_flow_prof_params { * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ -static enum ice_status -ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) +static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; @@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) - return ICE_ERR_PARAM; + return -EINVAL; } return 0; @@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ -static enum ice_status -ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) +static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; u8 i; @@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ -static enum ice_status +static int ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld, u64 match) { @@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, prot_id = ICE_PROT_GRE_OF; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } /* Each extraction sequence entry is a word in size, and extracts a @@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * does not exceed the block's capability */ if (params->es_cnt >= fv_words) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, * @params: information about the flow to be processed * @seg: index of packet segment whose raw fields are to be extracted */ -static enum ice_status +static int ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg) { @@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, if (params->prof->segs[seg].raws_cnt > ARRAY_SIZE(params->prof->segs[seg].raws)) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* Offsets within the segment headers are not supported */ hdrs_sz = ice_flow_calc_seg_sz(params, seg); if (!hdrs_sz) - return ICE_ERR_PARAM; + return -EINVAL; fv_words = hw->blk[params->blk].es.fvw; @@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, */ if (params->es_cnt >= hw->blk[params->blk].es.count || params->es_cnt >= ICE_MAX_FV_WORDS) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) @@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ -static enum ice_status +static int ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { struct ice_flow_prof *prof = params->prof; - enum ice_status status = 0; + int status = 0; u8 i; for (i = 0; i < prof->segs_cnt; i++) { @@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw, * @hw: pointer to the HW struct * @params: information about the flow to be processed */ -static enum ice_status +static int ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { - enum ice_status status; + int status; status = ice_flow_proc_seg_hdrs(params); if (status) @@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) status = 0; break; default: - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; } return status; @@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) * @blk: classification stage * @entry: flow entry to be removed */ -static enum ice_status +static int ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, struct ice_flow_entry *entry) { if (!entry) - return ICE_ERR_BAD_PTR; + return -EINVAL; list_del(&entry->l_entry); @@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk, * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; - enum ice_status status; + int status; u8 i; if (!prof) - return ICE_ERR_BAD_PTR; + return -EINVAL; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), GFP_KERNEL); if (!params->prof) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto free_params; } @@ -1432,11 +1428,11 @@ free_params: * * Assumption: the caller has acquired the lock to the profile list */ -static enum ice_status +static int ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { - enum ice_status status; + int status; /* Remove all remaining flow entries before removing the flow profile */ if (!list_empty(&prof->entries)) { @@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (!test_bit(vsi_handle, prof->vsis)) { status = ice_add_prof_id_flow(hw, blk, @@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ -static enum ice_status +static int ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { - enum ice_status status = 0; + int status = 0; if (test_bit(vsi_handle, prof->vsis)) { status = ice_rem_prof_id_flow(hw, blk, @@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, * @segs_cnt: number of packet segments provided * @prof: stores the returned flow profile added */ -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { - enum ice_status status; + int status; if (segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; if (!segs_cnt) - return ICE_ERR_PARAM; + return -EINVAL; if (!segs) - return ICE_ERR_BAD_PTR; + return -EINVAL; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) @@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; - enum ice_status status; + int status; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -1608,34 +1603,34 @@ out: * @data: pointer to a data buffer containing flow entry's match values/masks * @entry_h: pointer to buffer that receives the new flow entry's handle */ -enum ice_status +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, void *data, u64 *entry_h) { struct ice_flow_entry *e = NULL; struct ice_flow_prof *prof; - enum ice_status status; + int status; /* No flow entry data is expected for RSS */ if (!entry_h || (!data && blk != ICE_BLK_RSS)) - return ICE_ERR_BAD_PTR; + return -EINVAL; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } else { /* Allocate memory for the entry being added and associate * the VSI to the found flow profile */ e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL); if (!e) - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; else status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); } @@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, case ICE_BLK_RSS: break; default: - status = ICE_ERR_NOT_IMPL; + status = -EOPNOTSUPP; goto out; } @@ -1680,15 +1675,14 @@ out: * @blk: classification stage * @entry_h: handle to the flow entry to be removed */ -enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, - u64 entry_h) +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h) { struct ice_flow_entry *entry; struct ice_flow_prof *prof; - enum ice_status status = 0; + int status = 0; if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) - return ICE_ERR_PARAM; + return -EINVAL; entry = ICE_FLOW_ENTRY_PTR(entry_h); @@ -1812,6 +1806,57 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, seg->raws_cnt++; } +/** + * ice_flow_rem_vsi_prof - remove VSI from flow profile + * @hw: pointer to the hardware structure + * @vsi_handle: software VSI handle + * @prof_id: unique ID to identify this flow profile + * + * This function removes the flow entries associated to the input + * VSI handle and disassociate the VSI from the flow profile. + */ +int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id) +{ + struct ice_flow_prof *prof; + int status = 0; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return -EINVAL; + + /* find flow profile pointer with input package block and profile ID */ + prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id); + if (!prof) { + ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n", + prof_id); + return -ENOENT; + } + + /* Remove all remaining flow entries before removing the flow profile */ + if (!list_empty(&prof->entries)) { + struct ice_flow_entry *e, *t; + + mutex_lock(&prof->entries_lock); + list_for_each_entry_safe(e, t, &prof->entries, l_entry) { + if (e->vsi_handle != vsi_handle) + continue; + + status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e); + if (status) + break; + } + mutex_unlock(&prof->entries_lock); + } + if (status) + return status; + + /* disassociate the flow profile from sw VSI handle */ + status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle); + if (status) + ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n", + status); + return status; +} + #define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \ (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN) @@ -1836,7 +1881,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, * header value to set flow field segment for further use in flow * profile entry or removal. */ -static enum ice_status +static int ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, u32 flow_hdr) { @@ -1853,15 +1898,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS & ~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER) - return ICE_ERR_PARAM; + return -EINVAL; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !is_power_of_2(val)) - return ICE_ERR_CFG; + return -EIO; return 0; } @@ -1899,14 +1944,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (list_empty(&hw->fl_profs[blk])) return 0; @@ -1966,7 +2011,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { struct ice_rss_cfg *r, *rss_cfg; @@ -1981,7 +2026,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg), GFP_KERNEL); if (!rss_cfg) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; @@ -2022,21 +2067,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; - enum ice_status status; + int status; if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) - return ICE_ERR_PARAM; + return -EINVAL; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2128,15 +2173,15 @@ exit: * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ -enum ice_status +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2159,18 +2204,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * * Assumption: lock has already been acquired for RSS list */ -static enum ice_status +static int ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs, u8 segs_cnt) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; - enum ice_status status; + int status; segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL); if (!segs) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, @@ -2182,7 +2227,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto out; } @@ -2216,15 +2261,15 @@ out: * removed. Calls are made to underlying flow s which will APIs * turn build or update buffers for RSS XLT1 section. */ -enum ice_status __maybe_unused +int __maybe_unused ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs) { - enum ice_status status; + int status; if (hashed_flds == ICE_HASH_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, @@ -2279,20 +2324,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { - enum ice_status status = 0; + int status = 0; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) - return ICE_ERR_CFG; + return -EIO; hash_flds = avf_hash; @@ -2352,7 +2396,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) } if (rss_hash == ICE_HASH_INVALID) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, ICE_FLOW_SEG_HDR_NONE); @@ -2368,13 +2412,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { - enum ice_status status = 0; struct ice_rss_cfg *r; + int status = 0; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&hw->rss_locks); list_for_each_entry(r, &hw->rss_list_head, l_entry) { diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 2a2d8c1536cb..84b6e4464a21 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -383,33 +383,31 @@ struct ice_rss_cfg { u32 packet_hdr; }; -enum ice_status +int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof); -enum ice_status -ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); -enum ice_status +int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); -enum ice_status -ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); +int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range); void ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, u16 val_loc, u16 mask_loc); +int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); -enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); +int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); +int ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); -enum ice_status +int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, u32 addl_hdrs); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c index c2e78eaf4ccb..c29177c6bb9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.c +++ b/drivers/net/ethernet/intel/ice/ice_fltr.c @@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info, } /** + * ice_fltr_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Set VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false); +} + +/** + * ice_fltr_clear_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi: the VSI being configured + * @promisc_mask: mask of promiscuous config bits + * + * Clear VSI with all associated VLANs to given promiscuous mode(s) + */ +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask) +{ + return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true); +} + +/** + * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** + * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid); +} + +/** * ice_fltr_add_mac_list - add list of MAC filters * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_mac(&vsi->back->hw, list); } @@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_mac(&vsi->back->hw, list); } @@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_vlan(&vsi->back->hw, list); } @@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status +static int ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_vlan(&vsi->back->hw, list); @@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_add_eth_mac(&vsi->back->hw, list); } @@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list) * @vsi: pointer to VSI struct * @list: list of filters */ -static enum ice_status -ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) +static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) { return ice_remove_eth_mac(&vsi->back->hw, list); } @@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status (*mac_action)(struct ice_vsi *, - struct list_head *)) + int (*mac_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @mac_action: pointer to add or remove MAC function */ -static enum ice_status +static int ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action, - enum ice_status(*mac_action) + int(*mac_action) (struct ice_vsi *, struct list_head *)) { u8 broadcast[ETH_ALEN]; - enum ice_status result; LIST_HEAD(tmp_list); + int result; eth_broadcast_addr(broadcast); if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) || ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) { ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } result = mac_action(vsi, &tmp_list); @@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @action: action to be performed on filter match * @vlan_action: pointer to add or remove VLAN function */ -static enum ice_status +static int ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, enum ice_sw_fwd_act_type action, - enum ice_status (*vlan_action)(struct ice_vsi *, - struct list_head *)) + int (*vlan_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = vlan_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id, * @action: action to be performed on filter match * @eth_action: pointer to add or remove ethertype function */ -static enum ice_status +static int ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action, - enum ice_status (*eth_action)(struct ice_vsi *, - struct list_head *)) + int (*eth_action)(struct ice_vsi *, struct list_head *)) { - enum ice_status result; LIST_HEAD(tmp_list); + int result; if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action)) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; result = eth_action(vsi, &tmp_list); ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); @@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list); } @@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, * @mac: MAC to add * @action: action to be performed on filter match */ -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action) { @@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, * @mac: filter MAC to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list); } @@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, * @vlan_id: VLAN ID to add * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_add_vlan_list); @@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id, * @vlan_id: filter VLAN to remove * @action: action to remove */ -enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, - enum ice_sw_fwd_act_type action) +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_vlan(vsi, vlan_id, action, ice_fltr_remove_vlan_list); @@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id, * @flag: direction of packet to be filtered, Tx or Rx * @action: action to be performed on filter match */ -enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, - enum ice_sw_fwd_act_type action) +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_add_eth_list); @@ -389,89 +439,9 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, * @flag: direction of filter * @action: action to remove */ -enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, - u16 flag, enum ice_sw_fwd_act_type action) +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, + enum ice_sw_fwd_act_type action) { return ice_fltr_prepare_eth(vsi, ethertype, flag, action, ice_fltr_remove_eth_list); } - -/** - * ice_fltr_update_rule_flags - update lan_en/lb_en flags - * @hw: pointer to hw - * @rule_id: id of rule being updated - * @recipe_id: recipe id of rule - * @act: current action field - * @type: Rx or Tx - * @src: source VSI - * @new_flags: combinations of lb_en and lan_en - */ -static enum ice_status -ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id, - u32 act, u16 type, u16 src, u32 new_flags) -{ - struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status err; - u32 flags_mask; - - s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE; - act &= ~flags_mask; - act |= (flags_mask & new_flags); - - s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id); - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); - s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); - - if (type & ICE_FLTR_RX) { - s_rule->pdata.lkup_tx_rx.src = - cpu_to_le16(hw->port_info->lport); - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); - - } else { - s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src); - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); - } - - err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_update_sw_rules, NULL); - - kfree(s_rule); - return err; -} - -/** - * ice_fltr_build_action - build action for rule - * @vsi_id: id of VSI which is use to build action - */ -static u32 ice_fltr_build_action(u16 vsi_id) -{ - return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) | - ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; -} - -/** - * ice_fltr_update_flags_dflt_rule - update flags on default rule - * @vsi: pointer to VSI - * @rule_id: id of rule - * @direction: Tx or Rx - * @new_flags: flags to update - * - * Function updates flags on default rule with ICE_SW_LKUP_DFLT. - * - * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and - * ICE_SINGLE_ACT_LAN_ENABLE. - */ -enum ice_status -ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, - u32 new_flags) -{ - u32 action = ice_fltr_build_action(vsi->vsi_num); - struct ice_hw *hw = &vsi->back->hw; - - return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action, - direction, vsi->vsi_num, new_flags); -} diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h index 8eec4febead1..3eb42479175f 100644 --- a/drivers/net/ethernet/intel/ice/ice_fltr.h +++ b/drivers/net/ethernet/intel/ice/ice_fltr.h @@ -5,38 +5,49 @@ #define _ICE_FLTR_H_ void ice_fltr_free_list(struct device *dev, struct list_head *h); -enum ice_status +int +ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi, + u8 promisc_mask); +int +ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int +ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +int ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac, enum ice_sw_fwd_act_type action); -enum ice_status -ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); +int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list); -enum ice_status +int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); -enum ice_status +int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag, enum ice_sw_fwd_act_type action); void ice_fltr_remove_all(struct ice_vsi *vsi); -enum ice_status -ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction, - u32 new_flags); + +int +ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id, + u32 new_flags); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index f8601d5b0b19..665a344fb9c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -16,6 +16,18 @@ struct ice_fwu_priv { /* Track which NVM banks to activate at the end of the update */ u8 activate_flags; + + /* Track the firmware response of the required reset to complete the + * flash update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + */ + u8 reset_level; + + /* Track if EMP reset is available */ + u8 emp_reset_available; }; /** @@ -40,8 +52,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 *package_data; + int status; dev_dbg(dev, "Sending PLDM record package data to firmware\n"); @@ -54,9 +66,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) kfree(package_data); if (status) { - dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); return -EIO; } @@ -203,8 +214,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon struct device *dev = context->dev; struct ice_pf *pf = priv->pf; struct ice_hw *hw = &pf->hw; - enum ice_status status; size_t length; + int status; switch (component->identifier) { case NVM_COMP_ID_OROM: @@ -240,9 +251,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon kfree(comp_tbl); if (status) { - dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); return -EIO; } @@ -259,6 +269,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * @block_size: size of the block to write, up to 4k * @block: pointer to block of data to write * @last_cmd: whether this is the last command + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Write a block of data to a flash module, and await for the completion @@ -266,18 +277,24 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Note this function assumes the caller has acquired the NVM resource. * + * On successful return, reset level indicates the device reset required to + * complete the update. + * + * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required + * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required + * * Returns: zero on success, or a negative error code on failure. */ static int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, u16 block_size, u8 *block, bool last_cmd, - struct netlink_ext_ack *extack) + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u32 completion_offset; int err; @@ -286,11 +303,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n", block_size, module, offset); - status = ice_aq_update_nvm(hw, module, offset, block_size, block, - last_cmd, 0, NULL); - if (status) { - dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n", - module, block_size, offset, ice_stat_str(status), + err = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (err) { + dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n", + module, block_size, offset, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); return -EIO; @@ -338,6 +355,24 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, return -EIO; } + /* For the last command to write the NVM bank, newer versions of + * firmware indicate the required level of reset to complete + * activation of firmware. If the firmware supports this, cache the + * response for indicating to the user later. Otherwise, assume that + * a full power cycle is required. + */ + if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) { + if (hw->dev_caps.common_cap.pcie_reset_avoidance) { + *reset_level = (event.desc.params.nvm.cmd_flags & + ICE_AQC_NVM_RESET_LVL_M); + dev_dbg(dev, "Firmware reported required reset level as %u\n", + *reset_level); + } else { + *reset_level = ICE_AQC_NVM_POR_FLAG; + dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n"); + } + } + return 0; } @@ -348,6 +383,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, * @component: the name of the component being updated * @image: buffer of image data to write to the NVM * @length: length of the buffer + * @reset_level: storage for reset level required * @extack: netlink extended ACK structure * * Loop over the data for a given NVM module and program it in 4 Kb @@ -360,7 +396,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, */ static int ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, - const u8 *image, u32 length, + const u8 *image, u32 length, u8 *reset_level, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); @@ -394,7 +430,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, memcpy(block, image + offset, block_size); err = ice_write_one_nvm_block(pf, module, offset, block_size, - block, last_cmd, extack); + block, last_cmd, reset_level, + extack); if (err) break; @@ -445,7 +482,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct devlink *devlink; - enum ice_status status; int err; dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module); @@ -456,10 +492,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT); - status = ice_aq_erase_nvm(hw, module, NULL); - if (status) { - dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", - component, module, ice_stat_str(status), + err = ice_aq_erase_nvm(hw, module, NULL); + if (err) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n", + component, module, err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); err = -EIO; @@ -511,6 +547,7 @@ out_notify_devlink: * ice_switch_flash_banks - Tell firmware to switch NVM banks * @pf: Pointer to the PF data structure * @activate_flags: flags used for the activation command + * @emp_reset_available: on return, indicates if EMP reset is available * @extack: netlink extended ACK structure * * Notify firmware to activate the newly written flash banks, and wait for the @@ -518,27 +555,43 @@ out_notify_devlink: * * Returns: zero on success or an error code on failure. */ -static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, - struct netlink_ext_ack *extack) +static int +ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, + u8 *emp_reset_available, struct netlink_ext_ack *extack) { struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; - enum ice_status status; u16 completion_retval; + u8 response_flags; int err; memset(&event, 0, sizeof(event)); - status = ice_nvm_write_activate(hw, activate_flags); - if (status) { - dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_nvm_write_activate(hw, activate_flags, &response_flags); + if (err) { + dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); return -EIO; } + /* Newer versions of firmware have support to indicate whether an EMP + * reset to reload firmware is available. For older firmware, EMP + * reset is always available. + */ + if (emp_reset_available) { + if (hw->dev_caps.common_cap.reset_restrict_support) { + *emp_reset_available = response_flags & ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware indicated that EMP reset is %s\n", + *emp_reset_available ? + "available" : "not available"); + } else { + *emp_reset_available = ICE_AQC_NVM_EMPR_ENA; + dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n"); + } + } + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ, &event); if (err) { @@ -579,6 +632,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; const char *name; + u8 *reset_level; u16 module; u8 flag; int err; @@ -587,16 +641,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) case NVM_COMP_ID_OROM: module = ICE_SR_1ST_OROM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_OROM; + reset_level = NULL; name = "fw.undi"; break; case NVM_COMP_ID_NVM: module = ICE_SR_1ST_NVM_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NVM; + reset_level = &priv->reset_level; name = "fw.mgmt"; break; case NVM_COMP_ID_NETLIST: module = ICE_SR_NETLIST_BANK_PTR; flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; + reset_level = NULL; name = "fw.netlist"; break; default: @@ -616,7 +673,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) return err; return ice_write_nvm_module(pf, module, name, component->component_data, - component->component_size, extack); + component->component_size, reset_level, + extack); } /** @@ -634,110 +692,75 @@ static int ice_finalize_update(struct pldmfw *context) struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); struct netlink_ext_ack *extack = priv->extack; struct ice_pf *pf = priv->pf; + struct devlink *devlink; + int err; /* Finally, notify firmware to activate the written NVM banks */ - return ice_switch_flash_banks(pf, priv->activate_flags, extack); -} + err = ice_switch_flash_banks(pf, priv->activate_flags, + &priv->emp_reset_available, extack); + if (err) + return err; -static const struct pldmfw_ops ice_fwu_ops = { - .match_record = &pldmfw_op_pci_match_record, - .send_package_data = &ice_send_package_data, - .send_component_table = &ice_send_component_table, - .flash_component = &ice_flash_component, - .finalize_update = &ice_finalize_update, -}; + devlink = priv_to_devlink(pf); -/** - * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device - * @pf: private device driver structure - * @fw: firmware object pointing to the relevant firmware file - * @preservation: preservation level to request from firmware - * @extack: netlink extended ACK structure - * - * Parse the data for a given firmware file, verifying that it is a valid PLDM - * formatted image that matches this device. - * - * Extract the device record Package Data and Component Tables and send them - * to the firmware. Extract and write the flash data for each of the three - * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify - * firmware once the data is written to the inactive banks. - * - * Returns: zero on success or a negative error code on failure. - */ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack) -{ - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; - struct ice_fwu_priv priv; - enum ice_status status; - int err; + /* If the required reset is EMPR, but EMPR is disabled, report that + * a reboot is required instead. + */ + if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG && + !priv->emp_reset_available) { + dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n"); + priv->reset_level = ICE_AQC_NVM_PERST_FLAG; + } - switch (preservation) { - case ICE_AQC_NVM_PRESERVE_ALL: - case ICE_AQC_NVM_PRESERVE_SELECTED: - case ICE_AQC_NVM_NO_PRESERVATION: - case ICE_AQC_NVM_FACTORY_DEFAULT: + switch (priv->reset_level) { + case ICE_AQC_NVM_EMPR_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by devlink reload", + NULL, 0, 0); break; + case ICE_AQC_NVM_PERST_FLAG: + devlink_flash_update_status_notify(devlink, + "Activate new firmware by rebooting the system", + NULL, 0, 0); + break; + case ICE_AQC_NVM_POR_FLAG: default: - WARN(1, "Unexpected preservation level request %u", preservation); - return -EINVAL; - } - - memset(&priv, 0, sizeof(priv)); - - priv.context.ops = &ice_fwu_ops; - priv.context.dev = dev; - priv.extack = extack; - priv.pf = pf; - priv.activate_flags = preservation; - - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; - } - - err = pldmfw_flash_image(&priv.context, fw); - if (err == -ENOENT) { - dev_err(dev, "Firmware image has no record matching this device\n"); - NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); - } else if (err) { - /* Do not set a generic extended ACK message here. A more - * specific message may already have been set by one of our - * ops. - */ - dev_err(dev, "Failed to flash PLDM image, err %d", err); + devlink_flash_update_status_notify(devlink, + "Activate new firmware by power cycling the system", + NULL, 0, 0); + break; } - ice_release_nvm(hw); + pf->fw_emp_reset_disabled = !priv->emp_reset_available; - return err; + return 0; } +static const struct pldmfw_ops ice_fwu_ops = { + .match_record = &pldmfw_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; + /** - * ice_check_for_pending_update - Check for a pending flash update + * ice_get_pending_updates - Check if the component has a pending update * @pf: the PF driver structure - * @component: if not NULL, the name of the component being updated - * @extack: Netlink extended ACK structure + * @pending: on return, bitmap of updates pending + * @extack: Netlink extended ACK * - * Check whether the device already has a pending flash update. If such an - * update is found, cancel it so that the requested update may proceed. + * Check if the device has any pending updates on any flash components. * - * Returns: zero on success, or a negative error code on failure. + * Returns: zero on success, or a negative error code on failure. Updates + * pending with the bitmap of pending updates. */ -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack) +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack) { - struct devlink *devlink = priv_to_devlink(pf); struct device *dev = ice_pf_to_dev(pf); struct ice_hw_dev_caps *dev_caps; struct ice_hw *hw = &pf->hw; - enum ice_status status; - u8 pending = 0; int err; dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); @@ -749,30 +772,60 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, * may have changed, e.g. if an update was previously completed and * the system has not yet rebooted. */ - status = ice_discover_dev_caps(hw, dev_caps); - if (status) { + err = ice_discover_dev_caps(hw, dev_caps); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); kfree(dev_caps); - return -EIO; + return err; } + *pending = 0; + if (dev_caps->common_cap.nvm_update_pending_nvm) { dev_info(dev, "The fw.mgmt flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; } if (dev_caps->common_cap.nvm_update_pending_orom) { dev_info(dev, "The fw.undi flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; + *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; } if (dev_caps->common_cap.nvm_update_pending_netlist) { dev_info(dev, "The fw.netlist flash component has a pending update\n"); - pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; } kfree(dev_caps); + return 0; +} + +/** + * ice_cancel_pending_update - Cancel any pending update for a component + * @pf: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Cancel any pending update for the specified component. If component is + * NULL, all device updates will be canceled. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_cancel_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u8 pending; + int err; + + err = ice_get_pending_updates(pf, &pending, extack); + if (err) + return err; + /* If the flash_update request is for a specific component, ignore all * of the other components. */ @@ -798,17 +851,107 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component, "Canceling previous pending update", component, 0, 0); - status = ice_acquire_nvm(hw, ICE_RES_WRITE); - if (status) { - dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); - return -EIO; + return err; } pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; - err = ice_switch_flash_banks(pf, pending, extack); + err = ice_switch_flash_banks(pf, pending, NULL, extack); + + ice_release_nvm(hw); + + /* Since we've canceled the pending update, we no longer know if EMP + * reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + + return err; +} + +/** + * ice_devlink_flash_update - Write a firmware image to the device + * @devlink: pointer to devlink associated with the device to update + * @params: devlink flash update parameters + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Returns: zero on success or a negative error code on failure. + */ +int ice_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_fwu_priv priv; + u8 preservation; + int err; + + if (!params->overwrite_mask) { + /* preserve all settings and identifiers */ + preservation = ICE_AQC_NVM_PRESERVE_ALL; + } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) { + /* overwrite settings, but preserve the vital device identifiers */ + preservation = ICE_AQC_NVM_PRESERVE_SELECTED; + } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS | + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) { + /* overwrite both settings and identifiers, preserve nothing */ + preservation = ICE_AQC_NVM_NO_PRESERVATION; + } else { + NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported"); + return -EOPNOTSUPP; + } + + if (!hw->dev_caps.common_cap.nvm_unified_update) { + NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + memset(&priv, 0, sizeof(priv)); + + priv.context.ops = &ice_fwu_ops; + priv.context.dev = dev; + priv.extack = extack; + priv.pf = pf; + priv.activate_flags = preservation; + + devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); + + err = ice_cancel_pending_update(pf, NULL, extack); + if (err) + return err; + + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) { + dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return err; + } + + err = pldmfw_flash_image(&priv.context, params->fw); + if (err == -ENOENT) { + dev_err(dev, "Firmware image has no record matching this device\n"); + NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device"); + } else if (err) { + /* Do not set a generic extended ACK message here. A more + * specific message may already have been set by one of our + * ops. + */ + dev_err(dev, "Failed to flash PLDM image, err %d", err); + } ice_release_nvm(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h index c6390f6851ff..750574885716 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.h +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -4,9 +4,10 @@ #ifndef _ICE_FW_UPDATE_H_ #define _ICE_FW_UPDATE_H_ -int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, - u8 preservation, struct netlink_ext_ack *extack); -int ice_check_for_pending_update(struct ice_pf *pf, const char *component, - struct netlink_ext_ack *extack); +int ice_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); +int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index a49082485642..d16738a3d3a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -100,6 +100,7 @@ #define PF_SB_ATQT 0x0022FE00 #define PF_SB_ATQT_ATQT_S 0 #define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0) +#define PF_SB_REM_DEV_CTL 0x002300F0 #define PRTDCB_GENC 0x00083000 #define PRTDCB_GENC_PFCLDA_S 16 #define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16) @@ -440,6 +441,10 @@ #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define PRTRPB_RDPC 0x000AC260 +#define GLHH_ART_CTL 0x000A41D4 +#define GLHH_ART_CTL_ACTIVE_M BIT(0) +#define GLHH_ART_TIME_H 0x000A41D8 +#define GLHH_ART_TIME_L 0x000A41DC #define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4)) #define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4) #define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4)) @@ -452,6 +457,8 @@ #define GLTSYN_ENA_TSYN_ENA_M BIT(0) #define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4)) #define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4)) +#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4)) +#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4)) #define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4)) #define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4)) #define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4)) @@ -468,6 +475,8 @@ #define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) #define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) #define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) +#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */ +#define PFHH_SEM_BUSY_M BIT(0) #define PFTSYN_SEM 0x00088880 #define PFTSYN_SEM_BUSY_M BIT(0) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index adcc9a251595..fc3580167e7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -288,7 +288,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) adev->id = pf->aux_idx; adev->dev.release = ice_adev_release; adev->dev.parent = &pf->pdev->dev; - adev->name = IIDC_RDMA_ROCE_NAME; + adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; ret = auxiliary_device_init(adev); if (ret) { @@ -335,6 +335,6 @@ int ice_init_rdma(struct ice_pf *pf) dev_err(dev, "failed to reserve vectors for RDMA\n"); return ret; } - + pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; return ice_plug_aux_dev(pf); } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 40562600a8cf..0c187cf04fcf 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rx_rings) goto err_rings; - /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), + /* txq_map needs to have enough space to track both Tx (stack) rings + * and XDP rings; at this point vsi->num_xdp_txq might not be set, + * so use num_possible_cpus() as we want to always provide XDP ring + * per CPU, regardless of queue count settings from user that might + * have come from ethtool's set_channels() callback; + */ + vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) @@ -286,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctxt; - enum ice_status status; + int status; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -300,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); kfree(ctxt); } @@ -565,10 +570,16 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; u32 g_val, b_val; - /* Flow Director filters are only allocated/assigned to the PF VSI which - * passes the traffic. The CTRL VSI is only used to add/delete filters - * so we don't allocate resources to it + /* Flow Director filters are only allocated/assigned to the PF VSI or + * CHNL VSI which passes the traffic. The CTRL VSI is only used to + * add/delete filters so resources are not allocated to it */ + if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) + return -EPERM; + + if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF || + vsi->type == ICE_VSI_CHNL)) + return -EPERM; /* FD filters from guaranteed pool per VSI */ g_val = pf->hw.func_caps.fd_fltr_guar; @@ -580,19 +591,56 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi) if (!b_val) return -EPERM; - if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF)) - return -EPERM; + /* PF main VSI gets only 64 FD resources from guaranteed pool + * when ADQ is configured. + */ +#define ICE_PF_VSI_GFLTR 64 - if (!test_bit(ICE_FLAG_FD_ENA, pf->flags)) - return -EPERM; + /* determine FD filter resources per VSI from shared(best effort) and + * dedicated pool + */ + if (vsi->type == ICE_VSI_PF) { + vsi->num_gfltr = g_val; + /* if MQPRIO is configured, main VSI doesn't get all FD + * resources from guaranteed pool. PF VSI gets 64 FD resources + */ + if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { + if (g_val < ICE_PF_VSI_GFLTR) + return -EPERM; + /* allow bare minimum entries for PF VSI */ + vsi->num_gfltr = ICE_PF_VSI_GFLTR; + } - vsi->num_gfltr = g_val / pf->num_alloc_vsi; + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } else if (vsi->type == ICE_VSI_VF) { + vsi->num_gfltr = 0; - /* each VSI gets same "best_effort" quota */ - vsi->num_bfltr = b_val; + /* each VSI gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } else { + struct ice_vsi *main_vsi; + int numtc; - if (vsi->type == ICE_VSI_VF) { - vsi->num_gfltr = 0; + main_vsi = ice_get_main_vsi(pf); + if (!main_vsi) + return -EPERM; + + if (!main_vsi->all_numtc) + return -EINVAL; + + /* figure out ADQ numtc */ + numtc = main_vsi->all_numtc - ICE_CHNL_START_TC; + + /* only one TC but still asking resources for channels, + * invalid config + */ + if (numtc < ICE_CHNL_START_TC) + return -EPERM; + + g_val -= ICE_PF_VSI_GFLTR; + /* channel VSIs gets equal share from guaranteed pool */ + vsi->num_gfltr = g_val / numtc; /* each VSI gets same "best_effort" quota */ vsi->num_bfltr = b_val; @@ -704,15 +752,15 @@ bool ice_is_aux_ena(struct ice_pf *pf) static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; + int status; if (ice_is_safe_mode(pf)) return; status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); if (status) - dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -937,7 +985,7 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) u16 dflt_q, report_q, val; if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL && - vsi->type != ICE_VSI_VF) + vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL) return; val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; @@ -1540,8 +1588,8 @@ ice_vsi_cfg_rss_exit: static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1552,8 +1600,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA); if (status) - dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n", - vsi->vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n", + vsi->vsi_num, status); } /** @@ -1572,8 +1620,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { @@ -1585,57 +1633,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for IPv6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp4 with input set IP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n", + vsi_num, status); /* configure RSS for sctp6 with input set IPv6 src/dst */ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6); if (status) - dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n", - vsi_num, ice_stat_str(status)); + dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n", + vsi_num, status); } /** @@ -1744,22 +1792,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action) int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; - int err = 0; + int err; dev = ice_pf_to_dev(pf); - status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!status) { + err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI); + if (!err) { vsi->num_vlan--; - } else if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); + } else if (err == -ENOENT) { + dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n", + vid, vsi->vsi_num, err); + err = 0; } else { - dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n", - vid, vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", + vid, vsi->vsi_num, err); } return err; @@ -2100,8 +2147,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -2119,12 +2165,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2143,8 +2187,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; /* do not allow modifying VLAN stripping when a port VLAN is configured * on this VSI @@ -2172,12 +2215,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n", - ena, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n", + ena, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -2319,10 +2360,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); if (status) { - netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n", + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n", ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, - ice_stat_str(status), - ice_aq_str(pf->hw.adminq.sq_last_status)); + status, ice_aq_str(pf->hw.adminq.sq_last_status)); goto err_out; } @@ -2400,11 +2440,11 @@ clear_reg_idx: */ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { - enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, - enum ice_sw_fwd_act_type act); + int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, + enum ice_sw_fwd_act_type act); struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; dev = ice_pf_to_dev(pf); eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth; @@ -2423,9 +2463,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) } if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", + dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, ice_stat_str(status)); + vsi->vsi_num, status); } /** @@ -2445,7 +2485,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) struct ice_port_info *port_info; struct ice_pf *pf = vsi->back; u32 agg_node_id_start = 0; - enum ice_status status; + int status; /* create (as needed) scheduler aggregator node and move VSI into * corresponding aggregator node @@ -2571,7 +2611,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; struct ice_vsi *vsi; int ret, i; @@ -2720,11 +2759,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, } dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc); - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, - max_txqs); - if (status) { - dev_err(dev, "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + if (ret) { + dev_err(dev, "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); goto unroll_clear_rings; } @@ -3031,8 +3070,8 @@ void ice_napi_del(struct ice_vsi *vsi) */ int ice_vsi_release(struct ice_vsi *vsi) { - enum ice_status err; struct ice_pf *pf; + int err; if (!vsi->back) return -ENODEV; @@ -3268,7 +3307,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) int prev_num_q_vectors = 0; struct ice_vf *vf = NULL; enum ice_vsi_type vtype; - enum ice_status status; struct ice_pf *pf; int ret, i; @@ -3416,14 +3454,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) /* If MQPRIO is set, means channel code path, hence for main * VSI's, use TC as 1 */ - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); + if (ret) { + dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", + vsi->vsi_num, ret); if (init_vsi) { ret = -EIO; goto err_vectors; @@ -3658,7 +3696,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; struct ice_vsi_ctx *ctx; - enum ice_status status; struct device *dev; int i, ret = 0; u8 num_tc = 0; @@ -3700,25 +3737,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); - status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); - if (status) { + ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (ret) { dev_info(dev, "Failed VSI Update\n"); - ret = -EIO; goto out; } if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, - max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs); else - status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, - vsi->tc_cfg.ena_tc, max_txqs); + ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, + vsi->tc_cfg.ena_tc, max_txqs); - if (status) { - dev_err(dev, "VSI %d failed TC config, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - ret = -EIO; + if (ret) { + dev_err(dev, "VSI %d failed TC config, error %d\n", + vsi->vsi_num, ret); goto out; } ice_vsi_update_q_map(vsi, ctx); @@ -3771,39 +3805,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes) } /** - * ice_status_to_errno - convert from enum ice_status to Linux errno - * @err: ice_status value to convert - */ -int ice_status_to_errno(enum ice_status err) -{ - switch (err) { - case ICE_SUCCESS: - return 0; - case ICE_ERR_DOES_NOT_EXIST: - return -ENOENT; - case ICE_ERR_OUT_OF_RANGE: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_EMPTY: - case ICE_ERR_AQ_FW_CRITICAL: - return -EIO; - case ICE_ERR_PARAM: - case ICE_ERR_INVAL_SIZE: - return -EINVAL; - case ICE_ERR_NO_MEMORY: - return -ENOMEM; - case ICE_ERR_MAX_LIMIT: - return -EAGAIN; - case ICE_ERR_RESET_ONGOING: - return -EBUSY; - case ICE_ERR_AQ_FULL: - return -ENOSPC; - default: - return -EINVAL; - } -} - -/** * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used * @sw: switch to check if its default forwarding VSI is free * @@ -3844,8 +3845,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) */ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) { - enum ice_status status; struct device *dev; + int status; if (!sw || !vsi) return -EINVAL; @@ -3868,9 +3869,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n", - vsi->vsi_num, ice_stat_str(status)); - return -EIO; + dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n", + vsi->vsi_num, status); + return status; } sw->dflt_vsi = vsi; @@ -3890,8 +3891,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) int ice_clear_dflt_vsi(struct ice_sw *sw) { struct ice_vsi *dflt_vsi; - enum ice_status status; struct device *dev; + int status; if (!sw) return -EINVAL; @@ -3907,8 +3908,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, ICE_FLTR_RX); if (status) { - dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n", - dflt_vsi->vsi_num, ice_stat_str(status)); + dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n", + dflt_vsi->vsi_num, status); return -EIO; } @@ -3982,8 +3983,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi) int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4009,7 +4010,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n", min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n", @@ -4021,7 +4022,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) if (status) { dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n", @@ -4043,8 +4044,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate) int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; int speed; dev = ice_pf_to_dev(pf); @@ -4070,7 +4071,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n", max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n", @@ -4082,7 +4083,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate) if (status) { dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n", ice_vsi_type_str(vsi->type), vsi->idx); - return -EIO; + return status; } dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n", @@ -4102,7 +4103,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) struct device *dev = ice_pf_to_dev(vsi->back); struct ice_port_info *pi = vsi->port_info; struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; if (vsi->type != ICE_VSI_PF) return -EINVAL; @@ -4114,16 +4115,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) * a success code. Return an error if FW returns an error code other * than ICE_AQ_RC_EMODE */ - if (status == ICE_ERR_AQ_ERROR) { + if (status == -EIO) { if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) - dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); } else if (status) { - dev_err(dev, "can't set link to %s, err %s aq_err %s\n", - (ena ? "ON" : "OFF"), ice_stat_str(status), + dev_err(dev, "can't set link to %s, err %d aq_err %s\n", + (ena ? "ON" : "OFF"), status, ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; + return status; } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 6c803407b67d..b2ed189527d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes); void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes); void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); - -int ice_status_to_errno(enum ice_status err); - void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); -enum ice_status -ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); +int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_aux_ena(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f099797f35e3..30814435f779 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) */ static int ice_init_mac_fltr(struct ice_pf *pf) { - enum ice_status status; struct ice_vsi *vsi; u8 *perm_addr; @@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf) return -EINVAL; perm_addr = vsi->port_info->mac.perm_addr; - status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); - if (status) - return -EIO; - - return 0; + return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); } /** @@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** - * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF + * ice_set_promisc - Enable promiscuous mode for a given PF * @vsi: the VSI being configured * @promisc_m: mask of promiscuous config bits - * @set_promisc: enable or disable promisc flag request * */ -static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) +static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) { - struct ice_hw *hw = &vsi->back->hw; - enum ice_status status = 0; + int status; if (vsi->type != ICE_VSI_PF) return 0; - if (vsi->num_vlan > 1) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - set_promisc); - } else { - if (set_promisc) - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - } + if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; +} - if (status) - return -EIO; +/** + * ice_clear_promisc - Disable promiscuous mode for a given PF + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * + */ +static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) +{ + int status; - return 0; + if (vsi->type != ICE_VSI_PF) + return 0; + + if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0); + return status; } /** @@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) bool promisc_forced_on = false; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - enum ice_status status = 0; u32 changed_flags = 0; u8 promisc_m; - int err = 0; + int err; if (!vsi->netdev) return -EINVAL; @@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } /* Remove MAC addresses in the unsync list */ - status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); + err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); ice_fltr_free_list(dev, &vsi->tmp_unsync_list); - if (status) { + if (err) { netdev_err(netdev, "Failed to delete MAC filters\n"); /* if we failed because of alloc failures, just bail */ - if (status == ICE_ERR_NO_MEMORY) { - err = -ENOMEM; + if (err == -ENOMEM) goto out; - } } /* Add MAC addresses in the sync list */ - status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); + err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); ice_fltr_free_list(dev, &vsi->tmp_sync_list); /* If filter is added successfully or already exists, do not go into * 'if' condition and report it as error. Instead continue processing * rest of the function. */ - if (status && status != ICE_ERR_ALREADY_EXISTS) { + if (err && err != -EEXIST) { netdev_err(netdev, "Failed to add MAC filters\n"); /* If there is no more space for new umac filters, VSI * should go into promiscuous mode. There should be some @@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", vsi->vsi_num); } else { - err = -EIO; goto out; } } + err = 0; /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { @@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, true); + err = ice_set_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) else promisc_m = ICE_MCAST_PROMISC_BITS; - err = ice_cfg_promisc(vsi, promisc_m, false); + err = ice_clear_promisc(vsi, promisc_m); if (err) { netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", vsi->vsi_num); @@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + err = 0; ice_cfg_vlan_pruning(vsi, false); } } else { @@ -472,6 +472,25 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) } /** + * ice_clear_sw_switch_recipes - clear switch recipes + * @pf: board private structure + * + * Mark switch recipes as not created in sw structures. There are cases where + * rules (especially advanced rules) need to be restored, either re-read from + * hardware or added again. For example after the reset. 'recp_created' flag + * prevents from doing that and need to be cleared upfront. + */ +static void ice_clear_sw_switch_recipes(struct ice_pf *pf) +{ + struct ice_sw_recipe *recp; + u8 i; + + recp = pf->hw.switch_info->recp_list; + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) + recp[i].recp_created = false; +} + +/** * ice_prepare_for_reset - prep for reset * @pf: board private structure * @reset_type: reset type requested @@ -501,6 +520,11 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_for_each_vf(pf, i) ice_set_vf_state_qs_dis(&pf->vf[i]); + if (ice_is_eswitch_mode_switchdev(pf)) { + if (reset_type != ICE_RESET_PFR) + ice_clear_sw_switch_recipes(pf); + } + /* release ADQ specific HW and SW resources */ vsi = ice_get_main_vsi(pf); if (!vsi) @@ -539,7 +563,7 @@ skip: ice_pf_dis_all_vsi(pf, false); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_release(pf); + ice_ptp_prepare_for_reset(pf); if (hw->port_info) ice_sched_clear_port(hw->port_info); @@ -695,12 +719,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { struct ice_aqc_get_phy_caps_data *caps; const char *an_advertised; - enum ice_status status; const char *fec_req; const char *speed; const char *fec; const char *fc; const char *an; + int status; if (!vsi) return; @@ -1020,10 +1044,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, { struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; - enum ice_status status; struct ice_vsi *vsi; u16 old_link_speed; bool old_link; + int status; phy_info = &pi->phy; phy_info->link_info_old = phy_info->link_info; @@ -1036,8 +1060,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ status = ice_update_link_info(pi); if (status) - dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", - pi->lport, ice_stat_str(status), + dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", + pi->lport, status, ice_aq_str(pi->hw->adminq.sq_last_status)); ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); @@ -1063,6 +1087,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; + if (!ice_is_e810(&pf->hw)) + ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) ice_dcb_rebuild(pf); @@ -1407,15 +1434,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) return 0; do { - enum ice_status ret; u16 opcode; + int ret; ret = ice_clean_rq_elem(hw, cq, &event, &pending); - if (ret == ICE_ERR_AQ_NO_WORK) + if (ret == -EALREADY) break; if (ret) { - dev_err(dev, "%s Receive Queue event error %s\n", qtype, - ice_stat_str(ret)); + dev_err(dev, "%s Receive Queue event error %d\n", qtype, + ret); break; } @@ -1866,19 +1893,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, - NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, + pcaps, NULL); - if (status) { + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto out; } @@ -1977,8 +2002,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) struct ice_aqc_get_phy_caps_data *pcaps; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = pi->hw->back; - enum ice_status status; - int err = 0; + int err; if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) return -EIO; @@ -1988,14 +2012,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) return -ENOMEM; if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); - err = -EIO; goto err_out; } @@ -2049,8 +2072,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) struct ice_aqc_set_phy_cfg_data *cfg; struct ice_phy_info *phy = &pi->phy; struct ice_pf *pf = vsi->back; - enum ice_status status; - int err = 0; + int err; /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) @@ -2070,12 +2092,11 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, - NULL); - if (status) { - dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, + NULL); + if (err) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2089,15 +2110,14 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); if (ice_fw_supports_report_dflt_cfg(pi->hw)) - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, - pcaps, NULL); + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, + pcaps, NULL); else - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, - pcaps, NULL); - if (status) { - dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; + err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + pcaps, NULL); + if (err) { + dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", + vsi->vsi_num, err); goto done; } @@ -2150,12 +2170,10 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Enable link and link update */ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); - if (status) { - dev_err(dev, "Failed to set phy config, VSI %d error %s\n", - vsi->vsi_num, ice_stat_str(status)); - err = -EIO; - } + err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); + if (err) + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, err); kfree(cfg); done: @@ -2549,9 +2567,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .vsi_map_offset = vsi->alloc_txq, .mapping_mode = ICE_VSI_MAP_CONTIG }; - enum ice_status status; struct device *dev; int i, v_idx; + int status; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2605,11 +2623,22 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", - ice_stat_str(status)); + dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", + status); goto clear_xdp_rings; } - ice_vsi_assign_bpf_prog(vsi, prog); + + /* assign the prog only when it's not already present on VSI; + * this flow is a subject of both ethtool -L and ndo_bpf flows; + * VSI rebuild that happens under ethtool -L can expose us to + * the bpf_prog refcount issues as we would be swapping same + * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put + * on it as it would be treated as an 'old_prog'; for ndo_bpf + * this is not harmful as dev_xdp_install bumps the refcount + * before calling the op exposed by the driver; + */ + if (!ice_is_xdp_ena_vsi(vsi)) + ice_vsi_assign_bpf_prog(vsi, prog); return 0; clear_xdp_rings: @@ -2785,6 +2814,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); } else { + /* safe to call even when prog == vsi->xdp_prog as + * dev_xdp_install in net/core/dev.c incremented prog's + * refcount so corresponding bpf_prog_put won't cause + * underflow + */ ice_vsi_assign_bpf_prog(vsi, prog); } @@ -3504,7 +3538,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi; - int status = 0; + int status; if (ice_is_reset_in_progress(pf->state)) return -EBUSY; @@ -3517,10 +3551,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf) INIT_LIST_HEAD(&vsi->ch_list); status = ice_cfg_netdev(vsi); - if (status) { - status = -ENODEV; + if (status) goto unroll_vsi_setup; - } /* netdev has to be configured before setting frame size */ ice_vsi_cfg_frame_size(vsi); @@ -3544,14 +3576,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf) if (status) { dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n", vsi->vsi_num, status); - status = -EINVAL; goto unroll_napi_add; } status = ice_init_mac_fltr(pf); if (status) goto free_cpu_rx_map; - return status; + return 0; free_cpu_rx_map: ice_free_cpu_rx_rmap(vsi); @@ -4007,8 +4038,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); struct ice_vsi_ctx *ctxt; - enum ice_status status; struct ice_hw *hw; + int status; if (!vsi) return; @@ -4038,9 +4069,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } else { vsi->info.sec_flags = ctxt->info.sec_flags; vsi->info.sw_flags2 = ctxt->info.sw_flags2; @@ -4053,108 +4083,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) /** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info - * @status: status of package load + * @state: state of package load */ -static void -ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) +static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) { - struct ice_pf *pf = (struct ice_pf *)hw->back; - struct device *dev = ice_pf_to_dev(pf); + struct ice_pf *pf = hw->back; + struct device *dev; - switch (*status) { - case ICE_SUCCESS: - /* The package download AdminQ command returned success because - * this download succeeded or ICE_ERR_AQ_NO_WORK since there is - * already a package loaded on the device. - */ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, - sizeof(hw->pkg_name))) { - if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) - dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - else - dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft); - } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || - hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { - dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *status = ICE_ERR_NOT_SUPPORTED; - } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { - dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", - hw->active_pkg_name, - hw->active_pkg_ver.major, - hw->active_pkg_ver.minor, - hw->active_pkg_ver.update, - hw->active_pkg_ver.draft, - hw->pkg_name, - hw->pkg_ver.major, - hw->pkg_ver.minor, - hw->pkg_ver.update, - hw->pkg_ver.draft); - } else { - dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); - *status = ICE_ERR_NOT_SUPPORTED; - } + dev = ice_pf_to_dev(pf); + + switch (state) { + case ICE_DDP_PKG_SUCCESS: + dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft); + break; + case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: + dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", + hw->active_pkg_name, + hw->active_pkg_ver.major, + hw->active_pkg_ver.minor, + hw->active_pkg_ver.update, + hw->active_pkg_ver.draft, + hw->pkg_name, + hw->pkg_ver.major, + hw->pkg_ver.minor, + hw->pkg_ver.update, + hw->pkg_ver.draft); break; - case ICE_ERR_FW_DDP_MISMATCH: + case ICE_DDP_PKG_FW_MISMATCH: dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; - case ICE_ERR_BUF_TOO_SHORT: - case ICE_ERR_CFG: + case ICE_DDP_PKG_INVALID_FILE: dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); break; - case ICE_ERR_NOT_SUPPORTED: - /* Package File version not supported */ - if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); - else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || - (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && - hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) - dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: + dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); break; - case ICE_ERR_AQ_ERROR: - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_ESVN: - dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); - return; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); - /* poll for reset to complete */ - if (ice_check_reset(hw)) - dev_err(dev, "Error resetting device. Please reload the driver\n"); - return; - default: - break; - } - fallthrough; + case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: + dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: + dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: + dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_LOAD_ERROR: + dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); + /* poll for reset to complete */ + if (ice_check_reset(hw)) + dev_err(dev, "Error resetting device. Please reload the driver\n"); + break; + case ICE_DDP_PKG_ERR: default: - dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", - *status); + dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); break; } } @@ -4170,24 +4172,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { - enum ice_status status = ICE_ERR_PARAM; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ if (firmware && !hw->pkg_copy) { - status = ice_copy_and_init_pkg(hw, firmware->data, - firmware->size); - ice_log_pkg_init(hw, &status); + state = ice_copy_and_init_pkg(hw, firmware->data, + firmware->size); + ice_log_pkg_init(hw, state); } else if (!firmware && hw->pkg_copy) { /* Reload package during rebuild after CORER/GLOBR reset */ - status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); - ice_log_pkg_init(hw, &status); + state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + ice_log_pkg_init(hw, state); } else { dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); } - if (status) { + if (!ice_is_init_pkg_successful(state)) { /* Safe Mode */ clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); return; @@ -4218,9 +4220,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * ice_send_version - update firmware with driver version * @pf: PF struct * - * Returns ICE_SUCCESS on success, else error code + * Returns 0 on success, else error code */ -static enum ice_status ice_send_version(struct ice_pf *pf) +static int ice_send_version(struct ice_pf *pf) { struct ice_driver_ver dv; @@ -4510,7 +4512,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) * true */ if (ice_is_safe_mode(pf)) { - dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these @@ -4705,6 +4706,10 @@ probe_done: if (err) goto err_netdev_reg; + err = ice_devlink_register_params(pf); + if (err) + goto err_netdev_reg; + /* ready to go, so clear down state bit */ clear_bit(ICE_DOWN, pf->state); if (ice_is_aux_ena(pf)) { @@ -4712,7 +4717,7 @@ probe_done: if (pf->aux_idx < 0) { dev_err(dev, "Failed to allocate device ID for AUX driver\n"); err = -ENOMEM; - goto err_netdev_reg; + goto err_devlink_reg_param; } err = ice_init_rdma(pf); @@ -4731,6 +4736,8 @@ probe_done: err_init_aux_unroll: pf->adev = NULL; ida_free(&ice_aux_ida, pf->aux_idx); +err_devlink_reg_param: + ice_devlink_unregister_params(pf); err_netdev_reg: err_send_version_unroll: ice_vsi_release_all(pf); @@ -4787,9 +4794,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status status; u8 mac_addr[ETH_ALEN]; struct ice_vsi *vsi; + int status; u8 flags; if (!pf->wol_ena) @@ -4811,9 +4818,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); if (status) - dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); } /** @@ -4845,6 +4851,7 @@ static void ice_remove(struct pci_dev *pdev) ice_unplug_aux_dev(pf); if (pf->aux_idx >= 0) ida_free(&ice_aux_ida, pf->aux_idx); + ice_devlink_unregister_params(pf); set_bit(ICE_DOWN, pf->state); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); @@ -5351,11 +5358,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; - enum ice_status status; u8 old_mac[ETH_ALEN]; u8 flags = 0; - int err = 0; u8 *mac; + int err; mac = (u8 *)addr->sa_data; @@ -5387,22 +5393,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) netif_addr_unlock_bh(netdev); /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); + if (err && err != -ENOENT) { err = -EADDRNOTAVAIL; goto err_update_filters; } /* Add filter for new MAC. If filter exists, return success */ - status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) + err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); + if (err == -EEXIST) /* Although this MAC filter is already present in hardware it's * possible in some cases (e.g. bonding) that dev_addr was * modified outside of the driver and needs to be restored back * to this value. */ netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); - else if (status) + else if (err) /* error if the new filter addition failed */ err = -EADDRNOTAVAIL; @@ -5421,10 +5427,10 @@ err_update_filters: /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; - status = ice_aq_manage_mac_write(hw, mac, flags, NULL); - if (status) { - netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", - mac, ice_stat_str(status)); + err = ice_aq_manage_mac_write(hw, mac, flags, NULL); + if (err) { + netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", + mac, err); } return 0; } @@ -5466,8 +5472,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - enum ice_status status; u16 q_handle; + int status; u8 tc; /* Validate maxrate requested is within permitted range */ @@ -5487,13 +5493,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) else status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, q_handle, ICE_MAX_BW, maxrate * 1000); - if (status) { - netdev_err(netdev, "Unable to set Tx max rate, error %s\n", - ice_stat_str(status)); - return -EIO; - } + if (status) + netdev_err(netdev, "Unable to set Tx max rate, error %d\n", + status); - return 0; + return status; } /** @@ -5863,8 +5867,13 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); + if (!ice_is_e810(&pf->hw)) + ice_ptp_link_change(pf, pf->hw.pf_id, true); } + /* clear this now, and the first stats read will be used as baseline */ + vsi->stat_offsets_loaded = false; + ice_service_task_schedule(pf); return 0; @@ -5911,14 +5920,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st /** * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters * @vsi: the VSI to be updated + * @vsi_stats: the stats struct to be updated * @rings: rings to work on * @count: number of rings */ static void -ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, - u16 count) +ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, + struct rtnl_link_stats64 *vsi_stats, + struct ice_tx_ring **rings, u16 count) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; u16 i; for (i = 0; i < count; i++) { @@ -5942,15 +5952,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings, */ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) { - struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; + struct rtnl_link_stats64 *vsi_stats; u64 pkts, bytes; int i; - /* reset netdev stats */ - vsi_stats->tx_packets = 0; - vsi_stats->tx_bytes = 0; - vsi_stats->rx_packets = 0; - vsi_stats->rx_bytes = 0; + vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); + if (!vsi_stats) + return; /* reset non-netdev (extended) stats */ vsi->tx_restart = 0; @@ -5962,7 +5970,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) rcu_read_lock(); /* update Tx rings counters */ - ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, + vsi->num_txq); /* update Rx rings counters */ ice_for_each_rxq(vsi, i) { @@ -5977,10 +5986,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) /* update XDP Tx rings counters */ if (ice_is_xdp_ena_vsi(vsi)) - ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, + ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, vsi->num_xdp_txq); rcu_read_unlock(); + + vsi->net_stats.tx_packets = vsi_stats->tx_packets; + vsi->net_stats.tx_bytes = vsi_stats->tx_bytes; + vsi->net_stats.rx_packets = vsi_stats->rx_packets; + vsi->net_stats.rx_bytes = vsi_stats->rx_bytes; + + kfree(vsi_stats); } /** @@ -6243,15 +6259,18 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) /** * ice_down - Shutdown the connection * @vsi: The VSI being stopped + * + * Caller of this function is expected to set the vsi->state ICE_DOWN bit */ int ice_down(struct ice_vsi *vsi) { int i, tx_err, rx_err, link_err = 0; - /* Caller of this function is expected to set the - * vsi->state ICE_DOWN bit - */ + WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); + if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (!ice_is_e810(&vsi->back->hw)) + ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { @@ -6517,7 +6536,6 @@ static void ice_vsi_release_all(struct ice_pf *pf) static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int i, err; ice_for_each_vsi(pf, i) { @@ -6535,12 +6553,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) } /* replay filters for the VSI */ - status = ice_replay_vsi(&pf->hw, vsi->idx); - if (status) { - dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", - ice_stat_str(status), vsi->idx, - ice_vsi_type_str(type)); - return -EIO; + err = ice_replay_vsi(&pf->hw, vsi->idx); + if (err) { + dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", + err, vsi->idx, ice_vsi_type_str(type)); + return err; } /* Re-map HW VSI number, using VSI handle that has been @@ -6603,7 +6620,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - enum ice_status ret; int err; if (test_bit(ICE_DOWN, pf->state)) @@ -6611,10 +6627,17 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); - ret = ice_init_all_ctrlq(hw); - if (ret) { - dev_err(dev, "control queues init failed %s\n", - ice_stat_str(ret)); + if (reset_type == ICE_RESET_EMPR) { + /* If an EMP reset has occurred, any previously pending flash + * update will have completed. We no longer know whether or + * not the NVM update EMP reset is restricted. + */ + pf->fw_emp_reset_disabled = false; + } + + err = ice_init_all_ctrlq(hw); + if (err) { + dev_err(dev, "control queues init failed %d\n", err); goto err_init_ctrlq; } @@ -6628,10 +6651,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_load_pkg(NULL, pf); } - ret = ice_clear_pf_cfg(hw); - if (ret) { - dev_err(dev, "clear PF configuration failed %s\n", - ice_stat_str(ret)); + err = ice_clear_pf_cfg(hw); + if (err) { + dev_err(dev, "clear PF configuration failed %d\n", err); goto err_init_ctrlq; } @@ -6643,21 +6665,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_clear_pxe_mode(hw); - ret = ice_init_nvm(hw); - if (ret) { - dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); + err = ice_init_nvm(hw); + if (err) { + dev_err(dev, "ice_init_nvm failed %d\n", err); goto err_init_ctrlq; } - ret = ice_get_caps(hw); - if (ret) { - dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); + err = ice_get_caps(hw); + if (err) { + dev_err(dev, "ice_get_caps failed %d\n", err); goto err_init_ctrlq; } - ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); - if (ret) { - dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); + err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (err) { + dev_err(dev, "set_mac_cfg failed %d\n", err); goto err_init_ctrlq; } @@ -6695,7 +6717,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) * fail. */ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_init(pf); + ice_ptp_reset(pf); /* rebuild PF VSI */ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); @@ -6704,6 +6726,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } + /* configure PTP timestamping after VSI rebuild */ + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_cfg_timestamp(pf, false); + err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL); if (err) { dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err); @@ -6740,10 +6766,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_update_pf_netdev_link(pf); /* tell the firmware we are up */ - ret = ice_send_version(pf); - if (ret) { - dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", - ice_stat_str(ret)); + err = ice_send_version(pf); + if (err) { + dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", + err); goto err_vsi_rebuild; } @@ -6924,78 +6950,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err) } /** - * ice_stat_str - convert status err code to a string - * @stat_err: the status error code to convert - */ -const char *ice_stat_str(enum ice_status stat_err) -{ - switch (stat_err) { - case ICE_SUCCESS: - return "OK"; - case ICE_ERR_PARAM: - return "ICE_ERR_PARAM"; - case ICE_ERR_NOT_IMPL: - return "ICE_ERR_NOT_IMPL"; - case ICE_ERR_NOT_READY: - return "ICE_ERR_NOT_READY"; - case ICE_ERR_NOT_SUPPORTED: - return "ICE_ERR_NOT_SUPPORTED"; - case ICE_ERR_BAD_PTR: - return "ICE_ERR_BAD_PTR"; - case ICE_ERR_INVAL_SIZE: - return "ICE_ERR_INVAL_SIZE"; - case ICE_ERR_DEVICE_NOT_SUPPORTED: - return "ICE_ERR_DEVICE_NOT_SUPPORTED"; - case ICE_ERR_RESET_FAILED: - return "ICE_ERR_RESET_FAILED"; - case ICE_ERR_FW_API_VER: - return "ICE_ERR_FW_API_VER"; - case ICE_ERR_NO_MEMORY: - return "ICE_ERR_NO_MEMORY"; - case ICE_ERR_CFG: - return "ICE_ERR_CFG"; - case ICE_ERR_OUT_OF_RANGE: - return "ICE_ERR_OUT_OF_RANGE"; - case ICE_ERR_ALREADY_EXISTS: - return "ICE_ERR_ALREADY_EXISTS"; - case ICE_ERR_NVM: - return "ICE_ERR_NVM"; - case ICE_ERR_NVM_CHECKSUM: - return "ICE_ERR_NVM_CHECKSUM"; - case ICE_ERR_BUF_TOO_SHORT: - return "ICE_ERR_BUF_TOO_SHORT"; - case ICE_ERR_NVM_BLANK_MODE: - return "ICE_ERR_NVM_BLANK_MODE"; - case ICE_ERR_IN_USE: - return "ICE_ERR_IN_USE"; - case ICE_ERR_MAX_LIMIT: - return "ICE_ERR_MAX_LIMIT"; - case ICE_ERR_RESET_ONGOING: - return "ICE_ERR_RESET_ONGOING"; - case ICE_ERR_HW_TABLE: - return "ICE_ERR_HW_TABLE"; - case ICE_ERR_DOES_NOT_EXIST: - return "ICE_ERR_DOES_NOT_EXIST"; - case ICE_ERR_FW_DDP_MISMATCH: - return "ICE_ERR_FW_DDP_MISMATCH"; - case ICE_ERR_AQ_ERROR: - return "ICE_ERR_AQ_ERROR"; - case ICE_ERR_AQ_TIMEOUT: - return "ICE_ERR_AQ_TIMEOUT"; - case ICE_ERR_AQ_FULL: - return "ICE_ERR_AQ_FULL"; - case ICE_ERR_AQ_NO_WORK: - return "ICE_ERR_AQ_NO_WORK"; - case ICE_ERR_AQ_EMPTY: - return "ICE_ERR_AQ_EMPTY"; - case ICE_ERR_AQ_FW_CRITICAL: - return "ICE_ERR_AQ_FW_CRITICAL"; - } - - return "ICE_ERR_UNKNOWN"; -} - -/** * ice_set_rss_lut - Set RSS LUT * @vsi: Pointer to VSI structure * @lut: Lookup table @@ -7007,7 +6961,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7018,14 +6972,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_set_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7038,20 +6989,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7066,7 +7014,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) { struct ice_aq_get_set_rss_lut_params params = {}; struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!lut) return -EINVAL; @@ -7077,14 +7025,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) params.lut = lut; status = ice_aq_get_rss_lut(hw, ¶ms); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7097,20 +7042,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) { struct ice_hw *hw = &vsi->back->hw; - enum ice_status status; + int status; if (!seed) return -EINVAL; status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return -EIO; - } + if (status) + dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); - return 0; + return status; } /** @@ -7151,8 +7093,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; vsi_props = &vsi->info; @@ -7170,12 +7111,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", - bmode, ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", + bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } /* Update sw flags for book keeping */ @@ -7207,7 +7146,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, struct ice_pf *pf = np->vsi->back; struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; - enum ice_status status; struct ice_sw *pf_sw; int rem, v, err = 0; @@ -7241,14 +7179,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, /* Update the unicast switch filter rules for the corresponding * switch of the netdev */ - status = ice_update_sw_rule_bridge_mode(hw); - if (status) { - netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", - mode, ice_stat_str(status), + err = ice_update_sw_rule_bridge_mode(hw); + if (err) { + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", + mode, err, ice_aq_str(hw->adminq.sq_last_status)); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); - return -EIO; + return err; } pf_sw->bridge_mode = mode; @@ -7526,6 +7464,67 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, } /** + * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF + * @pf: ptr to PF device + * @vsi: ptr to VSI + */ +static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) +{ + struct device *dev = ice_pf_to_dev(pf); + bool added = false; + struct ice_hw *hw; + int flow; + + if (!(vsi->num_gfltr || vsi->num_bfltr)) + return -EINVAL; + + hw = &pf->hw; + for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { + struct ice_fd_hw_prof *prof; + int tun, status; + u64 entry_h; + + if (!(hw->fdir_prof && hw->fdir_prof[flow] && + hw->fdir_prof[flow]->cnt)) + continue; + + for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { + enum ice_flow_priority prio; + u64 prof_id; + + /* add this VSI to FDir profile for this flow */ + prio = ICE_FLOW_PRIO_NORMAL; + prof = hw->fdir_prof[flow]; + prof_id = flow + tun * ICE_FLTR_PTYPE_MAX; + status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, + prof->vsi_h[0], vsi->idx, + prio, prof->fdir_seg[tun], + &entry_h); + if (status) { + dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", + vsi->idx, flow); + continue; + } + + prof->entry_h[prof->cnt][tun] = entry_h; + } + + /* store VSI for filter replay and delete */ + prof->vsi_h[prof->cnt] = vsi->idx; + prof->cnt++; + + added = true; + dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, + flow); + } + + if (!added) + dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); + + return 0; +} + +/** * ice_add_channel - add a channel by adding VSI * @pf: ptr to PF device * @sw_id: underlying HW switching element ID @@ -7549,6 +7548,8 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) return -EINVAL; } + ice_add_vsi_to_fdir(pf, vsi); + ch->sw_id = sw_id; ch->vsi_num = vsi->vsi_num; ch->info.mapping_flags = vsi->info.mapping_flags; @@ -7849,6 +7850,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) if (rem_fltr) ice_rem_all_chnl_fltrs(pf); + /* remove ntuple filters since queue configuration is being changed */ + if (vsi->netdev->features & NETIF_F_NTUPLE) { + struct ice_hw *hw = &pf->hw; + + mutex_lock(&hw->fdir_fltr_lock); + ice_fdir_del_all_fltrs(vsi); + mutex_unlock(&hw->fdir_fltr_lock); + } + /* perform cleanup for channels if they exist */ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { struct ice_vsi *ch_vsi; @@ -7879,6 +7889,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) } } + /* Release FD resources for the channel VSI */ + ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); + /* clear the VSI from scheduler tree */ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); @@ -8423,7 +8436,6 @@ int ice_open_internal(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; struct ice_port_info *pi; - enum ice_status status; int err; if (test_bit(ICE_NEEDS_RESTART, pf->state)) { @@ -8434,11 +8446,10 @@ int ice_open_internal(struct net_device *netdev) netif_carrier_off(netdev); pi = vsi->port_info; - status = ice_update_link_info(pi); - if (status) { - netdev_err(netdev, "Failed to get link info, error %s\n", - ice_stat_str(status)); - return -EIO; + err = ice_update_link_info(pi); + if (err) { + netdev_err(netdev, "Failed to get link info, error %d\n", err); + return err; } ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index fee37a5844cf..4eb0599714f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ +#include <linux/vmalloc.h> + #include "ice_common.h" /** @@ -16,7 +18,7 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static enum ice_status +static int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) @@ -27,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); @@ -60,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { - enum ice_status status; u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; + int status; *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); - return ICE_ERR_PARAM; + return -EINVAL; } do { @@ -119,7 +121,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, * * Update the NVM using the admin queue commands (0x0703) */ -enum ice_status +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) @@ -131,7 +133,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); @@ -158,8 +160,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, * * Erase the NVM sector using the admin queue commands (0x0702) */ -enum ice_status -ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; @@ -184,12 +185,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ -static enum ice_status -ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) +static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); - enum ice_status status; __le16 data_local; + int status; /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and * Shadow RAM sector restrictions necessary when reading from the NVM. @@ -210,8 +210,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * * This function will request NVM ownership. */ -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->flash.blank_nvm_mode) return 0; @@ -318,18 +317,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ -static enum ice_status +static int ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { - enum ice_status status; + int status; u32 start; start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); - return ICE_ERR_PARAM; + return -EINVAL; } status = ice_acquire_nvm(hw, ICE_RES_READ); @@ -353,11 +352,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ -static enum ice_status +static int ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -377,7 +376,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ -static enum ice_status +static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); @@ -392,11 +391,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u * * Read a word from the specified netlist bank. */ -static enum ice_status +static int ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - enum ice_status status; __le16 data_local; + int status; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (__force u8 *)&data_local, sizeof(u16)); @@ -414,9 +413,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { @@ -438,13 +437,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - enum ice_status status; u16 pfa_len, pfa_ptr; u16 next_tlv; + int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status) { @@ -482,7 +481,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, *module_tlv_len = tlv_len; return 0; } - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) @@ -490,7 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } /** @@ -501,12 +500,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, * * Reads the part number string from the NVM. */ -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; - enum ice_status status; u16 pba_word, pba_size; + int status; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, @@ -525,7 +523,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); - return ICE_ERR_INVAL_SIZE; + return -EINVAL; } /* Subtract one to get PBA word count (PBA Size word is included in @@ -534,7 +532,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); - return ICE_ERR_PARAM; + return -EINVAL; } for (i = 0; i < pba_size; i++) { @@ -561,11 +559,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the NVM info structure. */ -static enum ice_status +static int ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; - enum ice_status status; + int status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { @@ -601,7 +599,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ -enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } @@ -615,49 +613,73 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ -static enum ice_status +static int ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { - struct ice_orom_civd_info tmp; - enum ice_status status; + u8 *orom_data; + int status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. * The first 4 bytes must contain the ASCII characters "$CIV". * A simple modulo 256 sum of all of the bytes of the structure must * equal 0. + * + * The exact location is unknown and varies between images but is + * usually somewhere in the middle of the bank. We need to scan the + * Option ROM bank to locate it. + * + * It's significantly faster to read the entire Option ROM up front + * using the maximum page size, than to read each possible location + * with a separate firmware command. */ + orom_data = vzalloc(hw->flash.banks.orom_size); + if (!orom_data) + return -ENOMEM; + + status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0, + orom_data, hw->flash.banks.orom_size); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); + return status; + } + + /* Scan the memory buffer to locate the CIVD data section */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { + struct ice_orom_civd_info *tmp; u8 sum = 0, i; - status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, - offset, (u8 *)&tmp, sizeof(tmp)); - if (status) { - ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n"); - return status; - } + tmp = (struct ice_orom_civd_info *)&orom_data[offset]; /* Skip forward until we find a matching signature */ - if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0) + if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0) continue; + ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n", + offset); + /* Verify that the simple checksum is zero */ - for (i = 0; i < sizeof(tmp); i++) + for (i = 0; i < sizeof(*tmp); i++) /* cppcheck-suppress objectIndex */ - sum += ((u8 *)&tmp)[i]; + sum += ((u8 *)tmp)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); - return ICE_ERR_NVM; + goto err_invalid_checksum; } - *civd = tmp; + *civd = *tmp; + vfree(orom_data); return 0; } - return ICE_ERR_NVM; + ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); + +err_invalid_checksum: + vfree(orom_data); + return -EIO; } /** @@ -669,12 +691,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, * Read Option ROM version and security revision from the Option ROM flash * section. */ -static enum ice_status +static int ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; - enum ice_status status; u32 combo_ver; + int status; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { @@ -700,7 +722,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o * section of flash. Used to access version data for a pending update that has * not yet been activated. */ -enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } @@ -715,13 +737,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ -static enum ice_status +static int ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; - enum ice_status status; u16 *id_blk; + int status; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) @@ -730,7 +752,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); @@ -741,7 +763,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); - return ICE_ERR_NVM; + return -EIO; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); @@ -751,7 +773,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); if (!id_blk) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, @@ -791,7 +813,7 @@ exit_error: * extract version data of a pending flash update in order to display the * version data. */ -enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } @@ -804,10 +826,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ -static enum ice_status ice_discover_flash_size(struct ice_hw *hw) +static int ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -819,7 +841,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); - if (status == ICE_ERR_AQ_ERROR && + if (status == -EIO && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); @@ -859,10 +881,9 @@ err_read_flat_nvm: * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) +static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -891,10 +912,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ -static enum ice_status -ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) +static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { - enum ice_status status; + int status; u16 value; status = ice_read_sr_word(hw, offset, &value); @@ -917,12 +937,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) * structure for later use in order to calculate the correct offset to read * from the active module. */ -static enum ice_status -ice_determine_active_flash_banks(struct ice_hw *hw) +static int ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; - enum ice_status status; u16 ctrl_word; + int status; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { @@ -933,7 +952,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw) /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); - return ICE_ERR_CFG; + return -EIO; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) @@ -997,12 +1016,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw) * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ -enum ice_status ice_init_nvm(struct ice_hw *hw) +int ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; - enum ice_status status; u32 fla, gens_stat; u8 sr_size; + int status; /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. @@ -1021,7 +1040,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); - return ICE_ERR_NVM_BLANK_MODE; + return -EIO; } status = ice_discover_flash_size(hw); @@ -1060,11 +1079,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) * * Verify NVM PFA checksum validity (0x0706) */ -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) +int ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) @@ -1080,7 +1099,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) if (!status) if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) - status = ICE_ERR_NVM_CHECKSUM; + status = -EIO; return status; } @@ -1088,22 +1107,35 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) /** * ice_nvm_write_activate * @hw: pointer to the HW struct - * @cmd_flags: NVM activate admin command bits (banks to be validated) + * @cmd_flags: flags for write activate command + * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) + * + * cmd_flags controls which banks to activate, and the preservation level to + * use when activating the NVM bank. + * + * On successful return of the firmware command, the response_flags variable + * is updated with the flags reported by firmware indicating certain status, + * such as whether EMP reset is enabled. */ -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; + int err; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); cmd->cmd_flags = cmd_flags; - return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!err && response_flags) + *response_flags = cmd->cmd_flags; + + return err; } /** @@ -1113,7 +1145,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) * Update empr (0x0709). This command allows SW to * request an EMPR to activate new FW. */ -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +int ice_aq_nvm_update_empr(struct ice_hw *hw) { struct ice_aq_desc desc; @@ -1136,7 +1168,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) * as part of the NVM update as the first cmd in the flow. */ -enum ice_status +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd) { @@ -1144,7 +1176,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, struct ice_aq_desc desc; if (length != 0 && !data) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pkg_data; @@ -1173,17 +1205,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, * the TransferFlag is set to End or StartAndEnd. */ -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd) { struct ice_aqc_nvm_pass_comp_tbl *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; if (!data || !comp_response || !comp_response_code) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.pass_comp_tbl; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index c6f05f43d593..856d1ad4398b 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -12,38 +12,34 @@ struct ice_orom_civd_info { __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ } __packed; -enum ice_status -ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); +int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); -enum ice_status +int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); -enum ice_status +int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type); -enum ice_status -ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); -enum ice_status -ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); -enum ice_status +int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom); +int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm); +int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist); -enum ice_status -ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); -enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); -enum ice_status +int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); +int ice_init_nvm(struct ice_hw *hw); +int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +int ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd); -enum ice_status +int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); -enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); -enum ice_status +int ice_nvm_validate_checksum(struct ice_hw *hw); +int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags); +int ice_aq_nvm_update_empr(struct ice_hw *hw); +int ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, u16 length, struct ice_sq_cd *cd); -enum ice_status +int ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, u8 transfer_flag, u8 *comp_response, u8 *comp_response_code, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index bf7247c6f58e..ae291d442539 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -6,6 +6,8 @@ #define E810_OUT_PROP_DELAY_NS 1 +#define UNKNOWN_INCVAL_E822 0x100000000ULL + static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, @@ -281,6 +283,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) else val &= ~PFINT_OICR_TSYN_TX_M; wr32(&pf->hw, PFINT_OICR_ENA, val); + + pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; } /** @@ -303,6 +307,9 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) continue; vsi->rx_rings[i]->ptp_rx = on; } + + pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; } /** @@ -313,18 +320,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) * This function will configure timestamping during PTP initialization * and deinitialization */ -static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) +void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { ice_set_tx_tstamp(pf, ena); ice_set_rx_tstamp(pf, ena); - - if (ena) { - pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; - pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; - } else { - pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; - } } /** @@ -682,6 +681,406 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) } /** + * ice_base_incval - Get base timer increment value + * @pf: Board private structure + * + * Look up the base timer increment value for this device. The base increment + * value is used to define the nominal clock tick rate. This increment value + * is programmed during device initialization. It is also used as the basis + * for calculating adjustments using scaled_ppm. + */ +static u64 ice_base_incval(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u64 incval; + + if (ice_is_e810(hw)) + incval = ICE_PTP_NOMINAL_INCVAL_E810; + else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) + incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); + else + incval = UNKNOWN_INCVAL_E822; + + dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", + incval); + + return incval; +} + +/** + * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad + * @pf: The PF private data structure + * @quad: The quad (0-4) + */ +static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad) +{ + struct ice_hw *hw = &pf->hw; + + ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); + ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); +} + +/** + * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state + * @port: PTP port for which Tx FIFO is checked + */ +static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) +{ + int quad = port->port_num / ICE_PORTS_PER_QUAD; + int offs = port->port_num % ICE_PORTS_PER_QUAD; + struct ice_pf *pf; + struct ice_hw *hw; + u32 val, phy_sts; + int err; + + pf = ptp_port_to_pf(port); + hw = &pf->hw; + + if (port->tx_fifo_busy_cnt == FIFO_OK) + return 0; + + /* need to read FIFO state */ + if (offs == 0 || offs == 1) + err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, + &val); + else + err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, + &val); + + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", + port->port_num, err); + return err; + } + + if (offs & 0x1) + phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; + else + phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; + + if (phy_sts & FIFO_EMPTY) { + port->tx_fifo_busy_cnt = FIFO_OK; + return 0; + } + + port->tx_fifo_busy_cnt++; + + dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", + port->tx_fifo_busy_cnt, port->port_num); + + if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { + dev_dbg(ice_pf_to_dev(pf), + "Port %d Tx FIFO still not empty; resetting quad %d\n", + port->port_num, quad); + ice_ptp_reset_ts_memory_quad(pf, quad); + port->tx_fifo_busy_cnt = FIFO_OK; + return 0; + } + + return -EAGAIN; +} + +/** + * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid + * @port: the PTP port to check + * + * Checks whether the Tx offset for the PHY associated with this port is + * valid. Returns 0 if the offset is valid, and a non-zero error code if it is + * not. + */ +static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port) +{ + struct ice_pf *pf = ptp_port_to_pf(port); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + u32 val; + int err; + + err = ice_ptp_check_tx_fifo(port); + if (err) + return err; + + err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS, + &val); + if (err) { + dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n", + port->port_num, err); + return -EAGAIN; + } + + if (!(val & P_REG_TX_OV_STATUS_OV_M)) + return -EAGAIN; + + return 0; +} + +/** + * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid + * @port: the PTP port to check + * + * Checks whether the Rx offset for the PHY associated with this port is + * valid. Returns 0 if the offset is valid, and a non-zero error code if it is + * not. + */ +static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port) +{ + struct ice_pf *pf = ptp_port_to_pf(port); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int err; + u32 val; + + err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS, + &val); + if (err) { + dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n", + port->port_num, err); + return err; + } + + if (!(val & P_REG_RX_OV_STATUS_OV_M)) + return -EAGAIN; + + return 0; +} + +/** + * ice_ptp_check_offset_valid - Check port offset valid bit + * @port: Port for which offset valid bit is checked + * + * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the + * offset is not ready. + */ +static int ice_ptp_check_offset_valid(struct ice_ptp_port *port) +{ + int tx_err, rx_err; + + /* always check both Tx and Rx offset validity */ + tx_err = ice_ptp_check_tx_offset_valid(port); + rx_err = ice_ptp_check_rx_offset_valid(port); + + if (tx_err || rx_err) + return -EAGAIN; + + return 0; +} + +/** + * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets + * @work: Pointer to the kthread_work structure for this task + * + * Check whether both the Tx and Rx offsets are valid for enabling the vernier + * calibration. + * + * Once we have valid offsets from hardware, update the total Tx and Rx + * offsets, and exit bypass mode. This enables more precise timestamps using + * the extra data measured during the vernier calibration process. + */ +static void ice_ptp_wait_for_offset_valid(struct kthread_work *work) +{ + struct ice_ptp_port *port; + int err; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + + port = container_of(work, struct ice_ptp_port, ov_work.work); + pf = ptp_port_to_pf(port); + hw = &pf->hw; + dev = ice_pf_to_dev(pf); + + if (ice_ptp_check_offset_valid(port)) { + /* Offsets not ready yet, try again later */ + kthread_queue_delayed_work(pf->ptp.kworker, + &port->ov_work, + msecs_to_jiffies(100)); + return; + } + + /* Offsets are valid, so it is safe to exit bypass mode */ + err = ice_phy_exit_bypass_e822(hw, port->port_num); + if (err) { + dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n", + port->port_num, err); + return; + } +} + +/** + * ice_ptp_port_phy_stop - Stop timestamping for a PHY port + * @ptp_port: PTP port to stop + */ +static int +ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) +{ + struct ice_pf *pf = ptp_port_to_pf(ptp_port); + u8 port = ptp_port->port_num; + struct ice_hw *hw = &pf->hw; + int err; + + if (ice_is_e810(hw)) + return 0; + + mutex_lock(&ptp_port->ps_lock); + + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + + err = ice_stop_phy_timer_e822(hw, port, true); + if (err) + dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", + port, err); + + mutex_unlock(&ptp_port->ps_lock); + + return err; +} + +/** + * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping + * @ptp_port: PTP port for which the PHY start is set + * + * Start the PHY timestamping block, and initiate Vernier timestamping + * calibration. If timestamping cannot be calibrated (such as if link is down) + * then disable the timestamping block instead. + */ +static int +ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) +{ + struct ice_pf *pf = ptp_port_to_pf(ptp_port); + u8 port = ptp_port->port_num; + struct ice_hw *hw = &pf->hw; + int err; + + if (ice_is_e810(hw)) + return 0; + + if (!ptp_port->link_up) + return ice_ptp_port_phy_stop(ptp_port); + + mutex_lock(&ptp_port->ps_lock); + + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + + /* temporarily disable Tx timestamps while calibrating PHY offset */ + ptp_port->tx.calibrating = true; + ptp_port->tx_fifo_busy_cnt = 0; + + /* Start the PHY timer in bypass mode */ + err = ice_start_phy_timer_e822(hw, port, true); + if (err) + goto out_unlock; + + /* Enable Tx timestamps right away */ + ptp_port->tx.calibrating = false; + + kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); + +out_unlock: + if (err) + dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", + port, err); + + mutex_unlock(&ptp_port->ps_lock); + + return err; +} + +/** + * ice_ptp_link_change - Set or clear port registers for timestamping + * @pf: Board private structure + * @port: Port for which the PHY start is set + * @linkup: Link is up or down + */ +int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +{ + struct ice_ptp_port *ptp_port; + + if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + return 0; + + if (port >= ICE_NUM_EXTERNAL_PORTS) + return -EINVAL; + + ptp_port = &pf->ptp.port; + if (ptp_port->port_num != port) + return -EINVAL; + + /* Update cached link err for this port immediately */ + ptp_port->link_up = linkup; + + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + /* PTP is not setup */ + return -EAGAIN; + + return ice_ptp_port_phy_restart(ptp_port); +} + +/** + * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads + * @pf: The PF private data structure + */ +static void ice_ptp_reset_ts_memory(struct ice_pf *pf) +{ + int quad; + + quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD; + ice_ptp_reset_ts_memory_quad(pf, quad); +} + +/** + * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt + * @pf: PF private structure + * @ena: bool value to enable or disable interrupt + * @threshold: Minimum number of packets at which intr is triggered + * + * Utility function to enable or disable Tx timestamp interrupt and threshold + */ +static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) +{ + struct ice_hw *hw = &pf->hw; + int err = 0; + int quad; + u32 val; + + ice_ptp_reset_ts_memory(pf); + + for (quad = 0; quad < ICE_MAX_QUAD; quad++) { + err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + &val); + if (err) + break; + + if (ena) { + val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; + val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & + Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); + } else { + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + } + + err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, + val); + if (err) + break; + } + + if (err) + dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", + err); + return err; +} + +/** + * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block + * @pf: Board private structure + */ +static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) +{ + ice_ptp_port_phy_restart(&pf->ptp.port); +} + +/** * ice_ptp_adjfine - Adjust clock increment rate * @info: the driver's PTP info structure * @scaled_ppm: Parts per million with 16-bit fractional field @@ -698,14 +1097,14 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) int neg_adj = 0; int err; - incval = ICE_PTP_NOMINAL_INCVAL_E810; + incval = ice_base_incval(pf); if (scaled_ppm < 0) { neg_adj = 1; scaled_ppm = -scaled_ppm; } - while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) { + while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) { /* handle overflow by scaling down the scaled_ppm and * the divisor, losing some precision */ @@ -905,7 +1304,10 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, start_time = div64_u64(current_time + NSEC_PER_SEC - 1, NSEC_PER_SEC) * NSEC_PER_SEC + phase; - start_time -= E810_OUT_PROP_DELAY_NS; + if (ice_is_e810(hw)) + start_time -= E810_OUT_PROP_DELAY_NS; + else + start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); /* 2. Write TARGET time */ wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); @@ -1088,6 +1490,12 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) struct ice_hw *hw = &pf->hw; int err; + /* For Vernier mode, we need to recalibrate after new settime + * Start with disabling timestamp block + */ + if (pf->ptp.port.link_up) + ice_ptp_port_phy_stop(&pf->ptp.port); + if (!ice_ptp_lock(hw)) { err = -EBUSY; goto exit; @@ -1104,6 +1512,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* Reenable periodic outputs */ ice_ptp_enable_all_clkout(pf); + + /* Recalibrate and re-enable timestamp block */ + if (pf->ptp.port.link_up) + ice_ptp_port_phy_restart(&pf->ptp.port); exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -1177,6 +1589,101 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return 0; } +#ifdef CONFIG_ICE_HWTS +/** + * ice_ptp_get_syncdevicetime - Get the cross time stamp info + * @device: Current device time + * @system: System counter value read synchronously with device time + * @ctx: Context provided by timekeeping code + * + * Read device and system (ART) clock simultaneously and return the corrected + * clock values in ns. + */ +static int +ice_ptp_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct ice_pf *pf = (struct ice_pf *)ctx; + struct ice_hw *hw = &pf->hw; + u32 hh_lock, hh_art_ctl; + int i; + + /* Get the HW lock */ + hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); + if (hh_lock & PFHH_SEM_BUSY_M) { + dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); + return -EFAULT; + } + + /* Start the ART and device clock sync sequence */ + hh_art_ctl = rd32(hw, GLHH_ART_CTL); + hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; + wr32(hw, GLHH_ART_CTL, hh_art_ctl); + +#define MAX_HH_LOCK_TRIES 100 + + for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { + /* Wait for sync to complete */ + hh_art_ctl = rd32(hw, GLHH_ART_CTL); + if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { + udelay(1); + continue; + } else { + u32 hh_ts_lo, hh_ts_hi, tmr_idx; + u64 hh_ts; + + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + /* Read ART time */ + hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); + hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); + hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; + *system = convert_art_ns_to_tsc(hh_ts); + /* Read Device source clock time */ + hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); + hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); + hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; + *device = ns_to_ktime(hh_ts); + break; + } + } + /* Release HW lock */ + hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); + hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; + wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); + + if (i == MAX_HH_LOCK_TRIES) + return -ETIMEDOUT; + + return 0; +} + +/** + * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp + * @info: the driver's PTP info structure + * @cts: The memory to fill the cross timestamp info + * + * Capture a cross timestamp between the ART and the device PTP hardware + * clock. Fill the cross timestamp information and report it back to the + * caller. + * + * This is only valid for E822 devices which have support for generating the + * cross timestamp via PCIe PTM. + * + * In order to correctly correlate the ART timestamp back to the TSC time, the + * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. + */ +static int +ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + + return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, + pf, NULL, cts); +} +#endif /* CONFIG_ICE_HWTS */ + /** * ice_ptp_get_ts_config - ioctl interface to read the timestamping config * @pf: Board private structure @@ -1205,10 +1712,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) static int ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) { - /* Reserved for future extensions. */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: ice_set_tx_tstamp(pf, false); @@ -1238,7 +1741,6 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - config->rx_filter = HWTSTAMP_FILTER_ALL; ice_set_rx_tstamp(pf, true); break; default: @@ -1270,8 +1772,8 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) if (err) return err; - /* Save these settings for future reference */ - pf->ptp.tstamp_config = config; + /* Return the actual configuration set */ + config = pf->ptp.tstamp_config; return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; @@ -1403,6 +1905,26 @@ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) } /** + * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support + * @pf: Board private structure + * @info: PTP info to fill + * + * Assign functions to the PTP capabiltiies structure for E822 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for E822 + * devices. + */ +static void +ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) +{ +#ifdef CONFIG_ICE_HWTS + if (boot_cpu_has(X86_FEATURE_ART) && + boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) + info->getcrosststamp = ice_ptp_getcrosststamp_e822; +#endif /* CONFIG_ICE_HWTS */ +} + +/** * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support * @pf: Board private structure * @info: PTP info to fill @@ -1441,7 +1963,10 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->gettimex64 = ice_ptp_gettimex64; info->settime64 = ice_ptp_settime64; - ice_ptp_set_funcs_e810(pf, info); + if (ice_is_e810(&pf->hw)) + ice_ptp_set_funcs_e810(pf, info); + else + ice_ptp_set_funcs_e822(pf, info); } /** @@ -1540,19 +2065,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work) if (err) continue; - /* Check if the timestamp is valid */ - if (!(raw_tstamp & ICE_PTP_TS_VALID)) + /* Check if the timestamp is invalid or stale */ + if (!(raw_tstamp & ICE_PTP_TS_VALID) || + raw_tstamp == tx->tstamps[idx].cached_tstamp) continue; - /* clear the timestamp register, so that it won't show valid - * again when re-used. - */ - ice_clear_phy_tstamp(hw, tx->quad, phy_idx); - /* The timestamp is valid, so we'll go ahead and clear this * index and then send the timestamp up to the stack. */ spin_lock(&tx->lock); + tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; @@ -1591,7 +2113,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) u8 idx; /* Check if this tracker is initialized */ - if (!tx->init) + if (!tx->init || tx->calibrating) return -1; spin_lock(&tx->lock); @@ -1707,13 +2229,34 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) kfree(tx->tstamps); tx->tstamps = NULL; - kfree(tx->in_use); + bitmap_free(tx->in_use); tx->in_use = NULL; tx->len = 0; } /** + * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps + * @pf: Board private structure + * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this port. For generic MAC devices, + * the timestamp block is shared for all ports in the same quad. To avoid + * ports using the same timestamp index, logically break the block of + * registers into chunks based on the port number. + */ +static int +ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +{ + tx->quad = port / ICE_PORTS_PER_QUAD; + tx->quad_offset = tx->quad * INDEX_PER_PORT; + tx->len = INDEX_PER_PORT; + + return ice_ptp_alloc_tx_tracker(tx); +} + +/** * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize @@ -1784,6 +2327,130 @@ static void ice_ptp_periodic_work(struct kthread_work *work) } /** + * ice_ptp_reset - Initialize PTP hardware clock support after reset + * @pf: Board private structure + */ +void ice_ptp_reset(struct ice_pf *pf) +{ + struct ice_ptp *ptp = &pf->ptp; + struct ice_hw *hw = &pf->hw; + struct timespec64 ts; + int err, itr = 1; + u64 time_diff; + + if (test_bit(ICE_PFR_REQ, pf->state)) + goto pfr; + + if (!hw->func_caps.ts_func_info.src_tmr_owned) + goto reset_ts; + + err = ice_ptp_init_phc(hw); + if (err) + goto err; + + /* Acquire the global hardware lock */ + if (!ice_ptp_lock(hw)) { + err = -EBUSY; + goto err; + } + + /* Write the increment time value to PHY and LAN */ + err = ice_ptp_write_incval(hw, ice_base_incval(pf)); + if (err) { + ice_ptp_unlock(hw); + goto err; + } + + /* Write the initial Time value to PHY and LAN using the cached PHC + * time before the reset and time difference between stopping and + * starting the clock. + */ + if (ptp->cached_phc_time) { + time_diff = ktime_get_real_ns() - ptp->reset_time; + ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); + } else { + ts = ktime_to_timespec64(ktime_get_real()); + } + err = ice_ptp_write_init(pf, &ts); + if (err) { + ice_ptp_unlock(hw); + goto err; + } + + /* Release the global hardware lock */ + ice_ptp_unlock(hw); + + if (!ice_is_e810(hw)) { + /* Enable quad interrupts */ + err = ice_ptp_tx_ena_intr(pf, true, itr); + if (err) + goto err; + } + +reset_ts: + /* Restart the PHY timestamping block */ + ice_ptp_reset_phy_timestamping(pf); + +pfr: + /* Init Tx structures */ + if (ice_is_e810(&pf->hw)) { + err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); + } else { + kthread_init_delayed_work(&ptp->port.ov_work, + ice_ptp_wait_for_offset_valid); + err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, + ptp->port.port_num); + } + if (err) + goto err; + + set_bit(ICE_FLAG_PTP, pf->flags); + + /* Start periodic work going */ + kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); + + dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); + return; + +err: + dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); +} + +/** + * ice_ptp_prepare_for_reset - Prepare PTP for reset + * @pf: Board private structure + */ +void ice_ptp_prepare_for_reset(struct ice_pf *pf) +{ + struct ice_ptp *ptp = &pf->ptp; + u8 src_tmr; + + clear_bit(ICE_FLAG_PTP, pf->flags); + + /* Disable timestamping for both Tx and Rx */ + ice_ptp_cfg_timestamp(pf, false); + + kthread_cancel_delayed_work_sync(&ptp->work); + kthread_cancel_work_sync(&ptp->extts_work); + + if (test_bit(ICE_PFR_REQ, pf->state)) + return; + + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); + + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + + src_tmr = ice_get_ptp_src_clock_index(&pf->hw); + + /* Disable source clock */ + wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); + + /* Acquire PHC and system timer to restore after reset */ + ptp->reset_time = ktime_get_real_ns(); +} + +/** * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device * @pf: Board private structure * @@ -1793,27 +2460,16 @@ static void ice_ptp_periodic_work(struct kthread_work *work) */ static int ice_ptp_init_owner(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct timespec64 ts; - u8 src_idx; - int err; - - wr32(hw, GLTSYN_SYNC_DLAY, 0); + int err, itr = 1; - /* Clear some HW residue and enable source clock */ - src_idx = hw->func_caps.ts_func_info.tmr_index_owned; - - /* Enable source clocks */ - wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); - - /* Enable PHY time sync */ - err = ice_ptp_init_phy_e810(hw); - if (err) - goto err_exit; - - /* Clear event status indications for auxiliary pins */ - (void)rd32(hw, GLTSYN_STAT(src_idx)); + err = ice_ptp_init_phc(hw); + if (err) { + dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", + err); + return err; + } /* Acquire the global hardware lock */ if (!ice_ptp_lock(hw)) { @@ -1822,7 +2478,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) } /* Write the increment time value to PHY and LAN */ - err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810); + err = ice_ptp_write_incval(hw, ice_base_incval(pf)); if (err) { ice_ptp_unlock(hw); goto err_exit; @@ -1839,6 +2495,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Release the global hardware lock */ ice_ptp_unlock(hw); + if (!ice_is_e810(hw)) { + /* Enable quad interrupts */ + err = ice_ptp_tx_ena_intr(pf, true, itr); + if (err) + goto err_exit; + } + /* Ensure we have a clock device */ err = ice_ptp_create_clock(pf); if (err) @@ -1852,72 +2515,106 @@ static int ice_ptp_init_owner(struct ice_pf *pf) err_clk: pf->ptp.clock = NULL; err_exit: - dev_err(dev, "PTP failed to register clock, err %d\n", err); - return err; } /** - * ice_ptp_init - Initialize the PTP support after device probe or reset + * ice_ptp_init_work - Initialize PTP work threads * @pf: Board private structure + * @ptp: PF PTP structure + */ +static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) +{ + struct kthread_worker *kworker; + + /* Initialize work functions */ + kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); + kthread_init_work(&ptp->extts_work, ice_ptp_extts_work); + + /* Allocate a kworker for handling work required for the ports + * connected to the PTP hardware clock. + */ + kworker = kthread_create_worker(0, "ice-ptp-%s", + dev_name(ice_pf_to_dev(pf))); + if (IS_ERR(kworker)) + return PTR_ERR(kworker); + + ptp->kworker = kworker; + + /* Start periodic work going */ + kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); + + return 0; +} + +/** + * ice_ptp_init_port - Initialize PTP port structure + * @pf: Board private structure + * @ptp_port: PTP port structure + */ +static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) +{ + mutex_init(&ptp_port->ps_lock); + + if (ice_is_e810(&pf->hw)) + return ice_ptp_init_tx_e810(pf, &ptp_port->tx); + + kthread_init_delayed_work(&ptp_port->ov_work, + ice_ptp_wait_for_offset_valid); + return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); +} + +/** + * ice_ptp_init - Initialize PTP hardware clock support + * @pf: Board private structure + * + * Set up the device for interacting with the PTP hardware clock for all + * functions, both the function that owns the clock hardware, and the + * functions connected to the clock hardware. * - * This function sets device up for PTP support. The first time it is run, it - * will create a clock device. It does not create a clock device if one - * already exists. It also reconfigures the device after a reset. + * The clock owner will allocate and register a ptp_clock with the + * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work + * items used for asynchronous work such as Tx timestamps and periodic work. */ void ice_ptp_init(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - struct kthread_worker *kworker; + struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; int err; - /* PTP is currently only supported on E810 devices */ - if (!ice_is_e810(hw)) - return; - - /* Check if this PF owns the source timer */ + /* If this function owns the clock hardware, it must allocate and + * configure the PTP clock device to represent it. + */ if (hw->func_caps.ts_func_info.src_tmr_owned) { err = ice_ptp_init_owner(pf); if (err) - return; + goto err; } - /* Disable timestamping for both Tx and Rx */ - ice_ptp_cfg_timestamp(pf, false); - - /* Initialize the PTP port Tx timestamp tracker */ - ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx); - - /* Initialize work functions */ - kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work); - kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work); + ptp->port.port_num = hw->pf_id; + err = ice_ptp_init_port(pf, &ptp->port); + if (err) + goto err; - /* Allocate a kworker for handling work required for the ports - * connected to the PTP hardware clock. - */ - kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev)); - if (IS_ERR(kworker)) { - err = PTR_ERR(kworker); - goto err_kworker; - } - pf->ptp.kworker = kworker; + /* Start the PHY timestamping block */ + ice_ptp_reset_phy_timestamping(pf); set_bit(ICE_FLAG_PTP, pf->flags); + err = ice_ptp_init_work(pf, ptp); + if (err) + goto err; - /* Start periodic work going */ - kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); - - dev_info(dev, "PTP init successful\n"); + dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; -err_kworker: +err: /* If we registered a PTP clock, release it */ if (pf->ptp.clock) { - ptp_clock_unregister(pf->ptp.clock); + ptp_clock_unregister(ptp->clock); pf->ptp.clock = NULL; } - dev_err(dev, "PTP failed %d\n", err); + clear_bit(ICE_FLAG_PTP, pf->flags); + dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); } /** @@ -1941,6 +2638,8 @@ void ice_ptp_release(struct ice_pf *pf) kthread_cancel_delayed_work_sync(&pf->ptp.work); + ice_ptp_port_phy_stop(&pf->ptp.port); + mutex_destroy(&pf->ptp.port.ps_lock); if (pf->ptp.kworker) { kthread_destroy_worker(pf->ptp.kworker); pf->ptp.kworker = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index f71ad317d6c8..afd048d69959 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -55,15 +55,21 @@ struct ice_perout_channel { * struct ice_tx_tstamp - Tracking for a single Tx timestamp * @skb: pointer to the SKB for this timestamp request * @start: jiffies when the timestamp was first requested + * @cached_tstamp: last read timestamp * * This structure tracks a single timestamp request. The SKB pointer is * provided when initiating a request. The start time is used to ensure that * we discard old requests that were not fulfilled within a 2 second time * window. + * Timestamp values in the PHY are read only and do not get cleared except at + * hardware reset or when a new timestamp value is captured. The cached_tstamp + * field is used to detect the case where a new timestamp has not yet been + * captured, ensuring that we avoid sending stale timestamp data to the stack. */ struct ice_tx_tstamp { struct sk_buff *skb; unsigned long start; + u64 cached_tstamp; }; /** @@ -76,6 +82,8 @@ struct ice_tx_tstamp { * @quad_offset: offset into timestamp block of the quad to get the real index * @len: length of the tstamps and in_use fields. * @init: if true, the tracker is initialized; + * @calibrating: if true, the PHY is calibrating the Tx offset. During this + * window, timestamps are temporarily disabled. */ struct ice_ptp_tx { struct kthread_work work; @@ -86,6 +94,7 @@ struct ice_ptp_tx { u8 quad_offset; u8 len; u8 init; + u8 calibrating; }; /* Quad and port information for initializing timestamp blocks */ @@ -95,15 +104,24 @@ struct ice_ptp_tx { /** * struct ice_ptp_port - data used to initialize an external port for PTP * - * This structure contains PTP data related to the external ports. Currently - * it is used for tracking the Tx timestamps of a port. In the future this - * structure will also hold information for the E822 port initialization - * logic. + * This structure contains data indicating whether a single external port is + * ready for PTP functionality. It is used to track the port initialization + * and determine when the port's PHY offset is valid. * * @tx: Tx timestamp tracking for this port + * @ov_work: delayed work task for tracking when PHY offset is valid + * @ps_lock: mutex used to protect the overall PTP PHY start procedure + * @link_up: indicates whether the link is up + * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy + * @port_num: the port number this structure represents */ struct ice_ptp_port { struct ice_ptp_tx tx; + struct kthread_delayed_work ov_work; + struct mutex ps_lock; /* protects overall PTP PHY start procedure */ + bool link_up; + u8 tx_fifo_busy_cnt; + u8 port_num; }; #define GLTSYN_TGT_H_IDX_MAX 4 @@ -121,6 +139,7 @@ struct ice_ptp_port { * @info: structure defining PTP hardware capabilities * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration + * @reset_time: kernel time after clock stop on reset */ struct ice_ptp { struct ice_ptp_port port; @@ -134,6 +153,7 @@ struct ice_ptp { struct ptp_clock_info info; struct ptp_clock *clock; struct hwtstamp_config tstamp_config; + u64 reset_time; }; #define __ptp_port_to_ptp(p) \ @@ -146,9 +166,15 @@ struct ice_ptp { #define ptp_info_to_pf(i) \ container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp) +#define PFTSYN_SEM_BYTES 4 #define PTP_SHARED_CLK_IDX_VALID BIT(31) +#define TS_CMD_MASK 0xF +#define SYNC_EXEC_CMD 0x3 #define ICE_PTP_TS_VALID BIT(0) +#define FIFO_EMPTY BIT(2) +#define FIFO_OK 0xFF +#define ICE_PTP_FIFO_NUM_CHECKS 5 /* Per-channel register definitions */ #define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) #define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8)) @@ -169,11 +195,13 @@ struct ice_ptp { #define N_PER_OUT_E810T 3 #define N_PER_OUT_E810T_NO_SMA 2 #define N_EXT_TS_E810_NO_SMA 2 +#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) struct ice_pf; int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); +void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); int ice_get_ptp_clock_index(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); @@ -182,8 +210,11 @@ void ice_ptp_process_ts(struct ice_pf *pf); void ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); +void ice_ptp_reset(struct ice_pf *pf); +void ice_ptp_prepare_for_reset(struct ice_pf *pf); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); +int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) { @@ -195,6 +226,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) return -EOPNOTSUPP; } +static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { } static inline int ice_get_ptp_clock_index(struct ice_pf *pf) { return -1; @@ -210,7 +242,11 @@ static inline void ice_ptp_process_ts(struct ice_pf *pf) { } static inline void ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } +static inline void ice_ptp_reset(struct ice_pf *pf) { } +static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } +static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +{ return 0; } #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ #endif /* _ICE_PTP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h new file mode 100644 index 000000000000..4109aa3b2fcd --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -0,0 +1,374 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2021, Intel Corporation. */ + +#ifndef _ICE_PTP_CONSTS_H_ +#define _ICE_PTP_CONSTS_H_ + +/* Constant definitions related to the hardware clock used for PTP 1588 + * features and functionality. + */ +/* Constants defined for the PTP 1588 clock hardware. */ + +/* struct ice_time_ref_info_e822 + * + * E822 hardware can use different sources as the reference for the PTP + * hardware clock. Each clock has different characteristics such as a slightly + * different frequency, etc. + * + * This lookup table defines several constants that depend on the current time + * reference. See the struct ice_time_ref_info_e822 for information about the + * meaning of each constant. + */ +const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* pll_freq */ + 823437500, /* 823.4375 MHz PLL */ + /* nominal_incval */ + 0x136e44fabULL, + /* pps_delay */ + 11, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* pll_freq */ + 783360000, /* 783.36 MHz */ + /* nominal_incval */ + 0x146cc2177ULL, + /* pps_delay */ + 12, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* pll_freq */ + 796875000, /* 796.875 MHz */ + /* nominal_incval */ + 0x141414141ULL, + /* pps_delay */ + 12, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* pll_freq */ + 816000000, /* 816 MHz */ + /* nominal_incval */ + 0x139b9b9baULL, + /* pps_delay */ + 12, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* pll_freq */ + 830078125, /* 830.78125 MHz */ + /* nominal_incval */ + 0x134679aceULL, + /* pps_delay */ + 11, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* pll_freq */ + 783360000, /* 783.36 MHz */ + /* nominal_incval */ + 0x146cc2177ULL, + /* pps_delay */ + 12, + }, +}; + +const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* refclk_pre_div */ + 1, + /* feedback_div */ + 197, + /* frac_n_div */ + 2621440, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 159, + /* frac_n_div */ + 1572864, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* refclk_pre_div */ + 5, + /* feedback_div */ + 159, + /* frac_n_div */ + 1572864, + /* post_pll_div */ + 6, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* refclk_pre_div */ + 10, + /* feedback_div */ + 223, + /* frac_n_div */ + 524288, + /* post_pll_div */ + 7, + }, +}; + +/* struct ice_vernier_info_e822 + * + * E822 hardware calibrates the delay of the timestamp indication from the + * actual packet transmission or reception during the initialization of the + * PHY. To do this, the hardware mechanism uses some conversions between the + * various clocks within the PHY block. This table defines constants used to + * calculate the correct conversion ratios in the PHY registers. + * + * Many of the values relate to the PAR/PCS clock conversion registers. For + * these registers, a value of 0 means that the associated register is not + * used by this link speed, and that the register should be cleared by writing + * 0. Other values specify the clock frequency in Hz. + */ +const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { + /* ICE_PTP_LNK_SPD_1G */ + { + /* tx_par_clk */ + 31250000, /* 31.25 MHz */ + /* rx_par_clk */ + 31250000, /* 31.25 MHz */ + /* tx_pcs_clk */ + 125000000, /* 125 MHz */ + /* rx_pcs_clk */ + 125000000, /* 125 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 25140, + /* pmd_adj_divisor */ + 10000000, + /* rx_fixed_delay */ + 17372, + }, + /* ICE_PTP_LNK_SPD_10G */ + { + /* tx_par_clk */ + 257812500, /* 257.8125 MHz */ + /* rx_par_clk */ + 257812500, /* 257.8125 MHz */ + /* tx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* rx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 6938, + /* pmd_adj_divisor */ + 82500000, + /* rx_fixed_delay */ + 6212, + }, + /* ICE_PTP_LNK_SPD_25G */ + { + /* tx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* rx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 0, /* unused */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 0, /* unused */ + /* tx_fixed_delay */ + 2778, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 2491, + }, + /* ICE_PTP_LNK_SPD_25G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 0, /* unused */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 0, /* unused */ + /* tx_desk_rsgb_par */ + 161132812, /* 162.1328125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 161132812, /* 162.1328125 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 97656250, /* 97.62625 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 97656250, /* 97.62625 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 3928, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 29535, + }, + /* ICE_PTP_LNK_SPD_40G */ + { + /* tx_par_clk */ + 257812500, + /* rx_par_clk */ + 257812500, + /* tx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* rx_pcs_clk */ + 156250000, /* 156.25 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 156250000, /* 156.25 MHz deskew clock */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 156250000, /* 156.25 MHz deskew clock */ + /* tx_fixed_delay */ + 5666, + /* pmd_adj_divisor */ + 82500000, + /* rx_fixed_delay */ + 4244, + }, + /* ICE_PTP_LNK_SPD_50G */ + { + /* tx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHZ */ + /* tx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* rx_pcs_clk */ + 390625000, /* 390.625 MHz */ + /* tx_desk_rsgb_par */ + 0, /* unused */ + /* rx_desk_rsgb_par */ + 195312500, /* 193.3125 MHz deskew clock */ + /* tx_desk_rsgb_pcs */ + 0, /* unused */ + /* rx_desk_rsgb_pcs */ + 195312500, /* 193.3125 MHz deskew clock */ + /* tx_fixed_delay */ + 2778, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 2868, + }, + /* ICE_PTP_LNK_SPD_50G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_desk_rsgb_par */ + 322265625, /* 322.265625 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 322265625, /* 322.265625 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 2095, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 14524, + }, + /* ICE_PTP_LNK_SPD_100G_RS */ + { + /* tx_par_clk */ + 0, /* unused */ + /* rx_par_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_pcs_clk */ + 0, /* unused */ + /* rx_pcs_clk */ + 644531250, /* 644.53125 MHz */ + /* tx_desk_rsgb_par */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_par */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* rx_desk_rsgb_pcs */ + 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + /* tx_fixed_delay */ + 1620, + /* pmd_adj_divisor */ + 206250000, + /* rx_fixed_delay */ + 7775, + }, +}; + +#endif /* _ICE_PTP_CONSTS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 29f947c0cd2e..ec8450f034e6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -3,6 +3,8 @@ #include "ice_common.h" #include "ice_ptp_hw.h" +#include "ice_ptp_consts.h" +#include "ice_cgu_regs.h" /* Low level functions for interacting with and managing the device clock used * for the Precision Time Protocol. @@ -29,6 +31,15 @@ * * For E810 devices, the increment frequency is 812.5 MHz * + * For E822 devices the clock can be derived from different sources, and the + * increment has an effective frequency of one of the following: + * - 823.4375 MHz + * - 783.36 MHz + * - 796.875 MHz + * - 816 MHz + * - 830.078125 MHz + * - 783.36 MHz + * * The hardware captures timestamps in the PHY for incoming packets, and for * outgoing packets on request. To support this, the PHY maintains a timer * that matches the lower 64 bits of the global source timer. @@ -37,6 +48,24 @@ * shadow registers are used to prepare the desired initial values. A special * sync command is issued to trigger copying from the shadow registers into * the appropriate source and PHY registers simultaneously. + * + * The driver supports devices which have different PHYs with subtly different + * mechanisms to program and control the timers. We divide the devices into + * families named after the first major device, E810 and similar devices, and + * E822 and similar devices. + * + * - E822 based devices have additional support for fine grained Vernier + * calibration which requires significant setup + * - The layout of timestamp data in the PHY register blocks is different + * - The way timer synchronization commands are issued is different. + * + * To support this, very low level functions have an e810 or e822 suffix + * indicating what type of device they work on. Higher level abstractions for + * tasks that can be done on both devices do not have the suffix and will + * correctly look up the appropriate low level function when running. + * + * Functions which only make sense on a single device family may not have + * a suitable generic implementation */ /** @@ -51,6 +80,2447 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw) return hw->func_caps.ts_func_info.tmr_index_assoc; } +/** + * ice_ptp_read_src_incval - Read source timer increment value + * @hw: pointer to HW struct + * + * Read the increment value of the source timer and return it. + */ +static u64 ice_ptp_read_src_incval(struct ice_hw *hw) +{ + u32 lo, hi; + u8 tmr_idx; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + + return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; +} + +/** + * ice_ptp_src_cmd - Prepare source timer for a timer command + * @hw: pointer to HW structure + * @cmd: Timer command + * + * Prepare the source timer for an upcoming timer sync command. + */ +static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val; + u8 tmr_idx; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + cmd_val = tmr_idx << SEL_CPK_SRC; + + switch (cmd) { + case INIT_TIME: + cmd_val |= GLTSYN_CMD_INIT_TIME; + break; + case INIT_INCVAL: + cmd_val |= GLTSYN_CMD_INIT_INCVAL; + break; + case ADJ_TIME: + cmd_val |= GLTSYN_CMD_ADJ_TIME; + break; + case ADJ_TIME_AT_TIME: + cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; + break; + case READ_TIME: + cmd_val |= GLTSYN_CMD_READ_TIME; + break; + } + + wr32(hw, GLTSYN_CMD, cmd_val); +} + +/** + * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands + * @hw: pointer to HW struct + * + * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the + * write immediately. This triggers the hardware to begin executing all of the + * source and PHY timer commands synchronously. + */ +static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) +{ + wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); + ice_flush(hw); +} + +/* E822 family functions + * + * The following functions operate on the E822 family of devices. + */ + +/** + * ice_fill_phy_msg_e822 - Fill message data for a PHY register access + * @msg: the PHY message buffer to fill in + * @port: the port to access + * @offset: the register offset + */ +static void +ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +{ + int phy_port, phy, quadtype; + + phy_port = port % ICE_PORTS_PER_PHY; + phy = port / ICE_PORTS_PER_PHY; + quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE; + + if (quadtype == 0) { + msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); + msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port); + } else { + msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port); + msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); + } + + if (phy == 0) + msg->dest_dev = rmn_0; + else if (phy == 1) + msg->dest_dev = rmn_1; + else + msg->dest_dev = rmn_2; +} + +/** + * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 64bit register + * + * Checks if the provided low address is one of the known 64bit PHY values + * represented as two 32bit registers. If it is, return the appropriate high + * register offset to use. + */ +static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case P_REG_PAR_PCS_TX_OFFSET_L: + *high_addr = P_REG_PAR_PCS_TX_OFFSET_U; + return true; + case P_REG_PAR_PCS_RX_OFFSET_L: + *high_addr = P_REG_PAR_PCS_RX_OFFSET_U; + return true; + case P_REG_PAR_TX_TIME_L: + *high_addr = P_REG_PAR_TX_TIME_U; + return true; + case P_REG_PAR_RX_TIME_L: + *high_addr = P_REG_PAR_RX_TIME_U; + return true; + case P_REG_TOTAL_TX_OFFSET_L: + *high_addr = P_REG_TOTAL_TX_OFFSET_U; + return true; + case P_REG_TOTAL_RX_OFFSET_L: + *high_addr = P_REG_TOTAL_RX_OFFSET_U; + return true; + case P_REG_UIX66_10G_40G_L: + *high_addr = P_REG_UIX66_10G_40G_U; + return true; + case P_REG_UIX66_25G_100G_L: + *high_addr = P_REG_UIX66_25G_100G_U; + return true; + case P_REG_TX_CAPTURE_L: + *high_addr = P_REG_TX_CAPTURE_U; + return true; + case P_REG_RX_CAPTURE_L: + *high_addr = P_REG_RX_CAPTURE_U; + return true; + case P_REG_TX_TIMER_INC_PRE_L: + *high_addr = P_REG_TX_TIMER_INC_PRE_U; + return true; + case P_REG_RX_TIMER_INC_PRE_L: + *high_addr = P_REG_RX_TIMER_INC_PRE_U; + return true; + default: + return false; + } +} + +/** + * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 40bit value + * + * Checks if the provided low address is one of the known 40bit PHY values + * split into two registers with the lower 8 bits in the low register and the + * upper 32 bits in the high register. If it is, return the appropriate high + * register offset to use. + */ +static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case P_REG_TIMETUS_L: + *high_addr = P_REG_TIMETUS_U; + return true; + case P_REG_PAR_RX_TUS_L: + *high_addr = P_REG_PAR_RX_TUS_U; + return true; + case P_REG_PAR_TX_TUS_L: + *high_addr = P_REG_PAR_TX_TUS_U; + return true; + case P_REG_PCS_RX_TUS_L: + *high_addr = P_REG_PCS_RX_TUS_U; + return true; + case P_REG_PCS_TX_TUS_L: + *high_addr = P_REG_PCS_TX_TUS_U; + return true; + case P_REG_DESK_PAR_RX_TUS_L: + *high_addr = P_REG_DESK_PAR_RX_TUS_U; + return true; + case P_REG_DESK_PAR_TX_TUS_L: + *high_addr = P_REG_DESK_PAR_TX_TUS_U; + return true; + case P_REG_DESK_PCS_RX_TUS_L: + *high_addr = P_REG_DESK_PCS_RX_TUS_U; + return true; + case P_REG_DESK_PCS_TX_TUS_L: + *high_addr = P_REG_DESK_PCS_TX_TUS_U; + return true; + default: + return false; + } +} + +/** + * ice_read_phy_reg_e822 - Read a PHY register + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @offset: PHY register offset to read + * @val: on return, the contents read from the PHY + * + * Read a PHY register for the given port over the device sideband queue. + */ +int +ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) +{ + struct ice_sbq_msg_input msg = {0}; + int err; + + ice_fill_phy_msg_e822(&msg, port, offset); + msg.opcode = ice_sbq_msg_rd; + + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; + } + + *val = msg.data; + + return 0; +} + +/** + * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * + * Reads the two registers associated with a 64bit value and returns it in the + * val pointer. The offset always specifies the lower register offset to use. + * The high offset is looked up. This function only operates on registers + * known to be two parts of a 64bit value. + */ +static int +ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) +{ + u32 low, high; + u16 high_addr; + int err; + + /* Only operate on registers known to be split into two 32bit + * registers. + */ + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + err = ice_read_phy_reg_e822(hw, port, low_addr, &low); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_read_phy_reg_e822(hw, port, high_addr, &high); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + *val = (u64)high << 32 | low; + + return 0; +} + +/** + * ice_write_phy_reg_e822 - Write a PHY register + * @hw: pointer to the HW struct + * @port: PHY port to write to + * @offset: PHY register offset to write + * @val: The value to write to the register + * + * Write a PHY register for the given port over the device sideband queue. + */ +int +ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) +{ + struct ice_sbq_msg_input msg = {0}; + int err; + + ice_fill_phy_msg_e822(&msg, port, offset); + msg.opcode = ice_sbq_msg_wr; + msg.data = val; + + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * + * Write the provided 40b value to the two associated registers by splitting + * it up into two chunks, the lower 8 bits and the upper 32 bits. + */ +static int +ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +{ + u32 low, high; + u16 high_addr; + int err; + + /* Only operate on registers known to be split into a lower 8 bit + * register and an upper 32 bit register. + */ + if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + low = (u32)(val & P_REG_40B_LOW_M); + high = (u32)(val >> P_REG_40B_HIGH_S); + + err = ice_write_phy_reg_e822(hw, port, low_addr, low); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_phy_reg_e822(hw, port, high_addr, high); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * + * Write the 64bit value to the two associated 32bit PHY registers. The offset + * is always specified as the lower register, and the high address is looked + * up. This function only operates on registers known to be two parts of + * a 64bit value. + */ +static int +ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) +{ + u32 low, high; + u16 high_addr; + int err; + + /* Only operate on registers known to be split into two 32bit + * registers. + */ + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", + low_addr); + return -EINVAL; + } + + low = lower_32_bits(val); + high = upper_32_bits(val); + + err = ice_write_phy_reg_e822(hw, port, low_addr, low); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_phy_reg_e822(hw, port, high_addr, high); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_fill_quad_msg_e822 - Fill message data for quad register access + * @msg: the PHY message buffer to fill in + * @quad: the quad to access + * @offset: the register offset + * + * Fill a message buffer for accessing a register in a quad shared between + * multiple PHYs. + */ +static void +ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +{ + u32 addr; + + msg->dest_dev = rmn_0; + + if ((quad % ICE_NUM_QUAD_TYPE) == 0) + addr = Q_0_BASE + offset; + else + addr = Q_1_BASE + offset; + + msg->msg_addr_low = lower_16_bits(addr); + msg->msg_addr_high = upper_16_bits(addr); +} + +/** + * ice_read_quad_reg_e822 - Read a PHY quad register + * @hw: pointer to the HW struct + * @quad: quad to read from + * @offset: quad register offset to read + * @val: on return, the contents read from the quad + * + * Read a quad register over the device sideband queue. Quad registers are + * shared between multiple PHYs. + */ +int +ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) +{ + struct ice_sbq_msg_input msg = {0}; + int err; + + if (quad >= ICE_MAX_QUAD) + return -EINVAL; + + ice_fill_quad_msg_e822(&msg, quad, offset); + msg.opcode = ice_sbq_msg_rd; + + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; + } + + *val = msg.data; + + return 0; +} + +/** + * ice_write_quad_reg_e822 - Write a PHY quad register + * @hw: pointer to the HW struct + * @quad: quad to write to + * @offset: quad register offset to write + * @val: The value to write to the register + * + * Write a quad register over the device sideband queue. Quad registers are + * shared between multiple PHYs. + */ +int +ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) +{ + struct ice_sbq_msg_input msg = {0}; + int err; + + if (quad >= ICE_MAX_QUAD) + return -EINVAL; + + ice_fill_quad_msg_e822(&msg, quad, offset); + msg.opcode = ice_sbq_msg_wr; + msg.data = val; + + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block + * @hw: pointer to the HW struct + * @quad: the quad to read from + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the two associated registers in the + * quad memory block that is shared between the internal PHYs of the E822 + * family of devices. + */ +static int +ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) +{ + u16 lo_addr, hi_addr; + u32 lo, hi; + int err; + + lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); + hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); + + err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", + err); + return err; + } + + err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", + err); + return err; + } + + /* For E822 based internal PHYs, the timestamp is reported with the + * lower 8 bits in the low register, and the upper 32 bits in the high + * register. + */ + *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + + return 0; +} + +/** + * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block + * @hw: pointer to the HW struct + * @quad: the quad to read from + * @idx: the timestamp index to reset + * + * Clear a timestamp, resetting its valid bit, from the PHY quad block that is + * shared between the internal PHYs on the E822 devices. + */ +static int +ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) +{ + u16 lo_addr, hi_addr; + int err; + + lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); + hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); + + err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", + err); + return err; + } + + err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_read_cgu_reg_e822 - Read a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to read + * @val: storage for register value read + * + * Read the contents of a register of the Clock Generation Unit. Only + * applicable to E822 devices. + */ +static int +ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) +{ + struct ice_sbq_msg_input cgu_msg; + int err; + + cgu_msg.opcode = ice_sbq_msg_rd; + cgu_msg.dest_dev = cgu; + cgu_msg.msg_addr_low = addr; + cgu_msg.msg_addr_high = 0x0; + + err = ice_sbq_rw_reg(hw, &cgu_msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + *val = cgu_msg.data; + + return err; +} + +/** + * ice_write_cgu_reg_e822 - Write a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to write + * @val: value to write into the register + * + * Write the specified value to a register of the Clock Generation Unit. Only + * applicable to E822 devices. + */ +static int +ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val) +{ + struct ice_sbq_msg_input cgu_msg; + int err; + + cgu_msg.opcode = ice_sbq_msg_wr; + cgu_msg.dest_dev = cgu; + cgu_msg.msg_addr_low = addr; + cgu_msg.msg_addr_high = 0x0; + cgu_msg.data = val; + + err = ice_sbq_rw_reg(hw, &cgu_msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + return err; +} + +/** + * ice_clk_freq_str - Convert time_ref_freq to string + * @clk_freq: Clock frequency + * + * Convert the specified TIME_REF clock frequency to a string. + */ +static const char *ice_clk_freq_str(u8 clk_freq) +{ + switch ((enum ice_time_ref_freq)clk_freq) { + case ICE_TIME_REF_FREQ_25_000: + return "25 MHz"; + case ICE_TIME_REF_FREQ_122_880: + return "122.88 MHz"; + case ICE_TIME_REF_FREQ_125_000: + return "125 MHz"; + case ICE_TIME_REF_FREQ_153_600: + return "153.6 MHz"; + case ICE_TIME_REF_FREQ_156_250: + return "156.25 MHz"; + case ICE_TIME_REF_FREQ_245_760: + return "245.76 MHz"; + default: + return "Unknown"; + } +} + +/** + * ice_clk_src_str - Convert time_ref_src to string + * @clk_src: Clock source + * + * Convert the specified clock source to its string name. + */ +static const char *ice_clk_src_str(u8 clk_src) +{ + switch ((enum ice_clk_src)clk_src) { + case ICE_CLK_SRC_TCX0: + return "TCX0"; + case ICE_CLK_SRC_TIME_REF: + return "TIME_REF"; + default: + return "Unknown"; + } +} + +/** + * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCX0) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + */ +static int +ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_bwm_lf bwm_lf; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCX0 && + clk_freq != ICE_TIME_REF_FREQ_25_000) { + dev_warn(ice_hw_to_dev(hw), + "TCX0 only supports 25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.field.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.field.time_ref_sel), + ice_clk_freq_str(dw9.field.time_ref_freq_sel), + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw24.field.ts_pll_enable) { + dw24.field.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.field.time_ref_freq_sel = clk_freq; + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; + dw19.field.tspll_ndivratio = 1; + + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; + dw22.field.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; + dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; + dw24.field.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw24.field.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + if (!bwm_lf.field.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.field.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.field.time_ref_sel), + ice_clk_freq_str(dw9.field.time_ref_freq_sel), + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_init_cgu_e822 - Initialize CGU with settings from firmware + * @hw: pointer to the HW structure + * + * Initialize the Clock Generation Unit of the E822 device. + */ +static int ice_init_cgu_e822(struct ice_hw *hw) +{ + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; + union tspll_cntr_bist_settings cntr_bist; + int err; + + err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + &cntr_bist.val); + if (err) + return err; + + /* Disable sticky lock detection so lock err reported is accurate */ + cntr_bist.field.i_plllock_sel_0 = 0; + cntr_bist.field.i_plllock_sel_1 = 0; + + err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, + cntr_bist.val); + if (err) + return err; + + /* Configure the CGU PLL using the parameters from the function + * capabilities. + */ + err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + if (err) + return err; + + return 0; +} + +/** + * ice_ptp_set_vernier_wl - Set the window length for vernier calibration + * @hw: pointer to the HW struct + * + * Set the window length used for the vernier port calibration process. + */ +static int ice_ptp_set_vernier_wl(struct ice_hw *hw) +{ + u8 port; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + int err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_WL, + PTP_VERNIER_WL); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform PHC initialization steps specific to E822 devices. + */ +static int ice_ptp_init_phc_e822(struct ice_hw *hw) +{ + int err; + u32 regval; + + /* Enable reading switch and PHY registers over the sideband queue */ +#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) +#define PF_SB_REM_DEV_CTL_PHY0 BIT(2) + regval = rd32(hw, PF_SB_REM_DEV_CTL); + regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | + PF_SB_REM_DEV_CTL_PHY0); + wr32(hw, PF_SB_REM_DEV_CTL, regval); + + /* Initialize the Clock Generation Unit */ + err = ice_init_cgu_e822(hw); + if (err) + return err; + + /* Set window length for all the ports */ + return ice_ptp_set_vernier_wl(hw); +} + +/** + * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time + * @hw: pointer to the HW struct + * @time: Time to initialize the PHY port clocks to + * + * Program the PHY port registers with a new initial time value. The port + * clock will be initialized once the driver issues an INIT_TIME sync + * command. The time value is the upper 32 bits of the PHY timer, usually in + * units of nominal nanoseconds. + */ +static int +ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) +{ + u64 phy_time; + u8 port; + int err; + + /* The time represents the upper 32 bits of the PHY timer, so we need + * to shift to account for this when programming. + */ + phy_time = (u64)time << 32; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + /* Tx case */ + err = ice_write_64b_phy_reg_e822(hw, port, + P_REG_TX_TIMER_INC_PRE_L, + phy_time); + if (err) + goto exit_err; + + /* Rx case */ + err = ice_write_64b_phy_reg_e822(hw, port, + P_REG_RX_TIMER_INC_PRE_L, + phy_time); + if (err) + goto exit_err; + } + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", + port, err); + + return err; +} + +/** + * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust + * @hw: pointer to HW struct + * @port: Port number to be programmed + * @time: time in cycles to adjust the port Tx and Rx clocks + * + * Program the port for an atomic adjustment by writing the Tx and Rx timer + * registers. The atomic adjustment won't be completed until the driver issues + * an ADJ_TIME command. + * + * Note that time is not in units of nanoseconds. It is in clock time + * including the lower sub-nanosecond portion of the port timer. + * + * Negative adjustments are supported using 2s complement arithmetic. + */ +int +ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) +{ + u32 l_time, u_time; + int err; + + l_time = lower_32_bits(time); + u_time = upper_32_bits(time); + + /* Tx case */ + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + /* Rx case */ + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", + port, err); + return err; +} + +/** + * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment in nanoseconds + * + * Prepare the PHY ports for an atomic time adjustment by programming the PHY + * Tx and Rx port registers. The actual adjustment is completed by issuing an + * ADJ_TIME or ADJ_TIME_AT_TIME sync command. + */ +static int +ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) +{ + s64 cycles; + u8 port; + + /* The port clock supports adjustment of the sub-nanosecond portion of + * the clock. We shift the provided adjustment in nanoseconds to + * calculate the appropriate adjustment to program into the PHY ports. + */ + if (adj > 0) + cycles = (s64)adj << 32; + else + cycles = -(((s64)-adj) << 32); + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + int err; + + err = ice_ptp_prep_port_adj_e822(hw, port, cycles); + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment + * @hw: pointer to HW struct + * @incval: new increment value to prepare + * + * Prepare each of the PHY ports for a new increment value by programming the + * port's TIMETUS registers. The new increment value will be updated after + * issuing an INIT_INCVAL command. + */ +static int +ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) +{ + int err; + u8 port; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, + incval); + if (err) + goto exit_err; + } + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", + port, err); + + return err; +} + +/** + * ice_ptp_read_port_capture - Read a port's local time capture + * @hw: pointer to HW struct + * @port: Port number to read + * @tx_ts: on return, the Tx port time capture + * @rx_ts: on return, the Rx port time capture + * + * Read the port's Tx and Rx local time capture values. + * + * Note this has no equivalent for the E810 devices. + */ +static int +ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) +{ + int err; + + /* Tx case */ + err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n", + (unsigned long long)*tx_ts); + + /* Rx case */ + err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n", + (unsigned long long)*rx_ts); + + return 0; +} + +/** + * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare the requested port for an upcoming timer sync command. + * + * Note there is no equivalent of this operation on E810, as that device + * always handles all external PHYs internally. + */ +static int +ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val, val; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + cmd_val = tmr_idx << SEL_PHY_SRC; + switch (cmd) { + case INIT_TIME: + cmd_val |= PHY_CMD_INIT_TIME; + break; + case INIT_INCVAL: + cmd_val |= PHY_CMD_INIT_INCVAL; + break; + case ADJ_TIME: + cmd_val |= PHY_CMD_ADJ_TIME; + break; + case READ_TIME: + cmd_val |= PHY_CMD_READ_TIME; + break; + case ADJ_TIME_AT_TIME: + cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; + break; + } + + /* Tx case */ + /* Read, modify, write */ + err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Modify necessary bits only and perform write */ + val &= ~TS_CMD_MASK; + val |= cmd_val; + + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Rx case */ + /* Read, modify, write */ + err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Modify necessary bits only and perform write */ + val &= ~TS_CMD_MASK; + val |= cmd_val; + + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command + * @hw: pointer to the HW struct + * @cmd: timer command to prepare + * + * Prepare all ports connected to this device for an upcoming timer sync + * command. + */ +static int +ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u8 port; + + for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + int err; + + err = ice_ptp_one_port_cmd(hw, port, cmd); + if (err) + return err; + } + + return 0; +} + +/* E822 Vernier calibration functions + * + * The following functions are used as part of the vernier calibration of + * a port. This calibration increases the precision of the timestamps on the + * port. + */ + +/** + * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode + * @hw: pointer to HW struct + * @port: the port to read from + * @link_out: if non-NULL, holds link speed on success + * @fec_out: if non-NULL, holds FEC algorithm on success + * + * Read the serdes data for the PHY port and extract the link speed and FEC + * algorithm. + */ +static int +ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, + enum ice_ptp_link_spd *link_out, + enum ice_ptp_fec_mode *fec_out) +{ + enum ice_ptp_link_spd link; + enum ice_ptp_fec_mode fec; + u32 serdes; + int err; + + err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); + return err; + } + + /* Determine the FEC algorithm */ + fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes); + + serdes &= P_REG_LINK_SPEED_SERDES_M; + + /* Determine the link speed */ + if (fec == ICE_PTP_FEC_MODE_RS_FEC) { + switch (serdes) { + case ICE_PTP_SERDES_25G: + link = ICE_PTP_LNK_SPD_25G_RS; + break; + case ICE_PTP_SERDES_50G: + link = ICE_PTP_LNK_SPD_50G_RS; + break; + case ICE_PTP_SERDES_100G: + link = ICE_PTP_LNK_SPD_100G_RS; + break; + default: + return -EIO; + } + } else { + switch (serdes) { + case ICE_PTP_SERDES_1G: + link = ICE_PTP_LNK_SPD_1G; + break; + case ICE_PTP_SERDES_10G: + link = ICE_PTP_LNK_SPD_10G; + break; + case ICE_PTP_SERDES_25G: + link = ICE_PTP_LNK_SPD_25G; + break; + case ICE_PTP_SERDES_40G: + link = ICE_PTP_LNK_SPD_40G; + break; + case ICE_PTP_SERDES_50G: + link = ICE_PTP_LNK_SPD_50G; + break; + default: + return -EIO; + } + } + + if (link_out) + *link_out = link; + if (fec_out) + *fec_out = fec; + + return 0; +} + +/** + * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp + * @hw: pointer to HW struct + * @port: to configure the quad for + */ +static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + int err; + u32 val; + u8 quad; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", + err); + return; + } + + quad = port / ICE_PORTS_PER_QUAD; + + err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", + err); + return; + } + + if (link_spd >= ICE_PTP_LNK_SPD_40G) + val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; + else + val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; + + err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", + err); + return; + } +} + +/** + * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 + * @hw: pointer to the HW structure + * @port: the port to configure + * + * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC + * hardware clock time units (TUs). That is, determine the number of TUs per + * serdes unit interval, and program the UIX registers with this conversion. + * + * This conversion is used as part of the calibration process when determining + * the additional error of a timestamp vs the real time of transmission or + * receipt of the packet. + * + * Hardware uses the number of TUs per 66 UIs, written to the UIX registers + * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks. + * + * To calculate the conversion ratio, we use the following facts: + * + * a) the clock frequency in Hz (cycles per second) + * b) the number of TUs per cycle (the increment value of the clock) + * c) 1 second per 1 billion nanoseconds + * d) the duration of 66 UIs in nanoseconds + * + * Given these facts, we can use the following table to work out what ratios + * to multiply in order to get the number of TUs per 66 UIs: + * + * cycles | 1 second | incval (TUs) | nanoseconds + * -------+--------------+--------------+------------- + * second | 1 billion ns | cycle | 66 UIs + * + * To perform the multiplication using integers without too much loss of + * precision, we can take use the following equation: + * + * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion) + * + * We scale up to using 6600 UI instead of 66 in order to avoid fractional + * nanosecond UIs (66 UI at 10G/40G is 6.4 ns) + * + * The increment value has a maximum expected range of about 34 bits, while + * the frequency value is about 29 bits. Multiplying these values shouldn't + * overflow the 64 bits. However, we must then further multiply them again by + * the Serdes unit interval duration. To avoid overflow here, we split the + * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and + * a divide by 390,625,000. This does lose some precision, but avoids + * miscalculation due to arithmetic overflow. + */ +static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) +{ + u64 cur_freq, clk_incval, tu_per_sec, uix; + int err; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second divided by 256 */ + tu_per_sec = (cur_freq * clk_incval) >> 8; + +#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */ +#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */ + + /* Program the 10Gb/40Gb conversion ratio */ + uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); + + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + uix); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", + err); + return err; + } + + /* Program the 25Gb/100Gb conversion ratio */ + uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); + + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + uix); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle + * @hw: pointer to the HW struct + * @port: port to configure + * + * Configure the number of TUs for the PAR and PCS clocks used as part of the + * timestamp calibration process. This depends on the link speed, as the PHY + * uses different markers depending on the speed. + * + * 1Gb/10Gb/25Gb: + * - Tx/Rx PAR/PCS markers + * + * 25Gb RS: + * - Tx/Rx Reed Solomon gearbox PAR/PCS markers + * + * 40Gb/50Gb: + * - Tx/Rx PAR/PCS markers + * - Rx Deskew PAR/PCS markers + * + * 50G RS and 100GB RS: + * - Tx/Rx Reed Solomon gearbox PAR/PCS markers + * - Rx Deskew PAR/PCS markers + * - Tx PAR/PCS markers + * + * To calculate the conversion, we use the PHC clock frequency (cycles per + * second), the increment value (TUs per cycle), and the related PHY clock + * frequency to calculate the TUs per unit of the PHY link clock. The + * following table shows how the units convert: + * + * cycles | TUs | second + * -------+-------+-------- + * second | cycle | cycles + * + * For each conversion register, look up the appropriate frequency from the + * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program + * this to the appropriate register, preparing hardware to perform timestamp + * calibration to calculate the total Tx or Rx offset to adjust the timestamp + * in order to calibrate for the internal PHY delays. + * + * Note that the increment value ranges up to ~34 bits, and the clock + * frequency is ~29 bits, so multiplying them together should fit within the + * 64 bit arithmetic. + */ +static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) +{ + u64 cur_freq, clk_incval, tu_per_sec, phy_tus; + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + int err; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (err) + return err; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per cycle of the PHC clock */ + tu_per_sec = cur_freq * clk_incval; + + /* For each PHY conversion register, look up the appropriate link + * speed frequency and determine the TUs per that clock's cycle time. + * Split this into a high and low value and then program the + * appropriate register. If that link speed does not use the + * associated register, write zeros to clear it instead. + */ + + /* P_REG_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_par_clk) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].tx_par_clk); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_par_clk) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].rx_par_clk); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_pcs_clk) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].tx_pcs_clk); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_pcs_clk) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].rx_pcs_clk); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_DESK_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_par) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].tx_desk_rsgb_par); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_DESK_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_par) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].rx_desk_rsgb_par); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_DESK_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_pcs) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].tx_desk_rsgb_pcs); + else + phy_tus = 0; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, + phy_tus); + if (err) + return err; + + /* P_REG_DESK_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_pcs) + phy_tus = div_u64(tu_per_sec, + e822_vernier[link_spd].rx_desk_rsgb_pcs); + else + phy_tus = 0; + + return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, + phy_tus); +} + +/** + * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port + * @hw: pointer to the HW struct + * @link_spd: the Link speed to calculate for + * + * Calculate the fixed offset due to known static latency data. + */ +static u64 +ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +{ + u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* Calculate number of TUs to add for the fixed Tx latency. Since the + * latency measurement is in 1/100th of a nanosecond, we need to + * multiply by tu_per_sec and then divide by 1e11. This calculation + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ + fixed_offset = div_u64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].tx_fixed_delay; + fixed_offset = div_u64(fixed_offset, 10000000); + + return fixed_offset; +} + +/** + * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to + * adjust Tx timestamps by. This is calculated by combining some known static + * latency along with the Vernier offset computations done by hardware. + * + * This function must be called only after the offset registers are valid, + * i.e. after the Vernier calibration wait has passed, to ensure that the PHY + * has measured the offset. + * + * To avoid overflow, when calculating the offset based on the known static + * latency values, we use measurements in 1/100th of a nanosecond, and divide + * the TUs per second up front. This avoids overflow while allowing + * calculation of the adjustment using integer arithmetic. + */ +static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset, val; + int err; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (err) + return err; + + total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + + /* Read the first Vernier offset from the PHY register and add it to + * the total offset. + */ + if (link_spd == ICE_PTP_LNK_SPD_1G || + link_spd == ICE_PTP_LNK_SPD_10G || + link_spd == ICE_PTP_LNK_SPD_25G || + link_spd == ICE_PTP_LNK_SPD_25G_RS || + link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G) { + err = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_PCS_TX_OFFSET_L, + &val); + if (err) + return err; + + total_offset += val; + } + + /* For Tx, we only need to use the second Vernier offset for + * multi-lane link speeds with RS-FEC. The lanes will always be + * aligned. + */ + if (link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + err = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_TX_TIME_L, + &val); + if (err) + return err; + + total_offset += val; + } + + /* Now that the total offset has been calculated, program it to the + * PHY and indicate that the Tx offset is ready. After this, + * timestamps will be enabled. + */ + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + total_offset); + if (err) + return err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + if (err) + return err; + + return 0; +} + +/** + * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Calculate and program the fixed Tx offset, and indicate that the offset is + * ready. This can be used when operating in bypass mode. + */ +static int +ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset; + int err; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (err) + return err; + + total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); + + /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L + * register, then indicate that the Tx offset is ready. After this, + * timestamps will be enabled. + * + * Note that this skips including the more precise offsets generated + * by the Vernier calibration. + */ + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, + total_offset); + if (err) + return err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); + if (err) + return err; + + return 0; +} + +/** + * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx + * @hw: pointer to the HW struct + * @port: the PHY port to adjust for + * @link_spd: the current link speed of the PHY + * @fec_mode: the current FEC mode of the PHY + * @pmd_adj: on return, the amount to adjust the Rx total offset by + * + * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY. + * This varies by link speed and FEC mode. The value calculated accounts for + * various delays caused when receiving a packet. + */ +static int +ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, + enum ice_ptp_link_spd link_spd, + enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) +{ + u64 cur_freq, clk_incval, tu_per_sec, mult, adj; + u8 pmd_align; + u32 val; + int err; + + err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", + err); + return err; + } + + pmd_align = (u8)val; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* The PMD alignment adjustment measurement depends on the link speed, + * and whether FEC is enabled. For each link speed, the alignment + * adjustment is calculated by dividing a value by the length of + * a Time Unit in nanoseconds. + * + * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8 + * 10G: align == 65 ? 0 : (align * 0.1 * 32/33) + * 10G w/FEC: align * 0.1 * 32/33 + * 25G: align == 65 ? 0 : (align * 0.4 * 32/33) + * 25G w/FEC: align * 0.4 * 32/33 + * 40G: align == 65 ? 0 : (align * 0.1 * 32/33) + * 40G w/FEC: align * 0.1 * 32/33 + * 50G: align == 65 ? 0 : (align * 0.4 * 32/33) + * 50G w/FEC: align * 0.8 * 32/33 + * + * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33. + * + * To allow for calculating this value using integer arithmetic, we + * instead start with the number of TUs per second, (inverse of the + * length of a Time Unit in nanoseconds), multiply by a value based + * on the PMD alignment register, and then divide by the right value + * calculated based on the table above. To avoid integer overflow this + * division is broken up into a step of dividing by 125 first. + */ + if (link_spd == ICE_PTP_LNK_SPD_1G) { + if (pmd_align == 4) + mult = 10; + else + mult = (pmd_align + 6) % 10; + } else if (link_spd == ICE_PTP_LNK_SPD_10G || + link_spd == ICE_PTP_LNK_SPD_25G || + link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G) { + /* If Clause 74 FEC, always calculate PMD adjust */ + if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74) + mult = pmd_align; + else + mult = 0; + } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS || + link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + if (pmd_align < 17) + mult = pmd_align + 40; + else + mult = pmd_align; + } else { + ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n", + link_spd); + mult = 0; + } + + /* In some cases, there's no need to adjust for the PMD alignment */ + if (!mult) { + *pmd_adj = 0; + return 0; + } + + /* Calculate the adjustment by multiplying TUs per second by the + * appropriate multiplier and divisor. To avoid overflow, we first + * divide by 125, and then handle remaining divisor based on the link + * speed pmd_adj_divisor value. + */ + adj = div_u64(tu_per_sec, 125); + adj *= mult; + adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor); + + /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx + * cycle count is necessary. + */ + if (link_spd == ICE_PTP_LNK_SPD_25G_RS) { + u64 cycle_adj; + u8 rx_cycle; + + err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, + &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", + err); + return err; + } + + rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M; + if (rx_cycle) { + mult = (4 - rx_cycle) * 40; + + cycle_adj = div_u64(tu_per_sec, 125); + cycle_adj *= mult; + cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); + + adj += cycle_adj; + } + } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) { + u64 cycle_adj; + u8 rx_cycle; + + err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, + &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", + err); + return err; + } + + rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M; + if (rx_cycle) { + mult = rx_cycle * 40; + + cycle_adj = div_u64(tu_per_sec, 125); + cycle_adj *= mult; + cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); + + adj += cycle_adj; + } + } + + /* Return the calculated adjustment */ + *pmd_adj = adj; + + return 0; +} + +/** + * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port + * @hw: pointer to HW struct + * @link_spd: The Link speed to calculate for + * + * Determine the fixed Rx latency for a given link speed. + */ +static u64 +ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) +{ + u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; + + cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); + clk_incval = ice_ptp_read_src_incval(hw); + + /* Calculate TUs per second */ + tu_per_sec = cur_freq * clk_incval; + + /* Calculate number of TUs to add for the fixed Rx latency. Since the + * latency measurement is in 1/100th of a nanosecond, we need to + * multiply by tu_per_sec and then divide by 1e11. This calculation + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ + fixed_offset = div_u64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].rx_fixed_delay; + fixed_offset = div_u64(fixed_offset, 10000000); + + return fixed_offset; +} + +/** + * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to + * adjust Rx timestamps by. This combines calculations from the Vernier offset + * measurements taken in hardware with some data about known fixed delay as + * well as adjusting for multi-lane alignment delay. + * + * This function must be called only after the offset registers are valid, + * i.e. after the Vernier calibration wait has passed, to ensure that the PHY + * has measured the offset. + * + * To avoid overflow, when calculating the offset based on the known static + * latency values, we use measurements in 1/100th of a nanosecond, and divide + * the TUs per second up front. This avoids overflow while allowing + * calculation of the adjustment using integer arithmetic. + */ +static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset, pmd, val; + int err; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (err) + return err; + + total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + + /* Read the first Vernier offset from the PHY register and add it to + * the total offset. + */ + err = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_PCS_RX_OFFSET_L, + &val); + if (err) + return err; + + total_offset += val; + + /* For Rx, all multi-lane link speeds include a second Vernier + * calibration, because the lanes might not be aligned. + */ + if (link_spd == ICE_PTP_LNK_SPD_40G || + link_spd == ICE_PTP_LNK_SPD_50G || + link_spd == ICE_PTP_LNK_SPD_50G_RS || + link_spd == ICE_PTP_LNK_SPD_100G_RS) { + err = ice_read_64b_phy_reg_e822(hw, port, + P_REG_PAR_RX_TIME_L, + &val); + if (err) + return err; + + total_offset += val; + } + + /* In addition, Rx must account for the PMD alignment */ + err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); + if (err) + return err; + + /* For RS-FEC, this adjustment adds delay, but for other modes, it + * subtracts delay. + */ + if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC) + total_offset += pmd; + else + total_offset -= pmd; + + /* Now that the total offset has been calculated, program it to the + * PHY and indicate that the Rx offset is ready. After this, + * timestamps will be enabled. + */ + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + total_offset); + if (err) + return err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + if (err) + return err; + + return 0; +} + +/** + * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * Calculate and program the fixed Rx offset, and indicate that the offset is + * ready. This can be used when operating in bypass mode. + */ +static int +ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port) +{ + enum ice_ptp_link_spd link_spd; + enum ice_ptp_fec_mode fec_mode; + u64 total_offset; + int err; + + err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); + if (err) + return err; + + total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); + + /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L + * register, then indicate that the Rx offset is ready. After this, + * timestamps will be enabled. + * + * Note that this skips including the more precise offsets generated + * by Vernier calibration. + */ + err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, + total_offset); + if (err) + return err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); + if (err) + return err; + + return 0; +} + +/** + * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @phy_time: on return, the 64bit PHY timer value + * @phc_time: on return, the lower 64bits of PHC time + * + * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC + * timer values. + */ +static int +ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, + u64 *phc_time) +{ + u64 tx_time, rx_time; + u32 zo, lo; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + /* Prepare the PHC timer for a READ_TIME capture command */ + ice_ptp_src_cmd(hw, READ_TIME); + + /* Prepare the PHY timer for a READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, READ_TIME); + if (err) + return err; + + /* Issue the sync to start the READ_TIME capture */ + ice_ptp_exec_tmr_cmd(hw); + + /* Read the captured PHC time from the shadow time registers */ + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + *phc_time = (u64)lo << 32 | zo; + + /* Read the captured PHY time from the PHY shadow registers */ + err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time); + if (err) + return err; + + /* If the PHY Tx and Rx timers don't match, log a warning message. + * Note that this should not happen in normal circumstances since the + * driver always programs them together. + */ + if (tx_time != rx_time) + dev_warn(ice_hw_to_dev(hw), + "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", + port, (unsigned long long)tx_time, + (unsigned long long)rx_time); + + *phy_time = tx_time; + + return 0; +} + +/** + * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer + * @hw: pointer to the HW struct + * @port: the PHY port to synchronize + * + * Perform an adjustment to ensure that the PHY and PHC timers are in sync. + * This is done by issuing a READ_TIME command which triggers a simultaneous + * read of the PHY timer and PHC timer. Then we use the difference to + * calculate an appropriate 2s complement addition to add to the PHY timer in + * order to ensure it reads the same value as the primary PHC timer. + */ +static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) +{ + u64 phc_time, phy_time, difference; + int err; + + if (!ice_ptp_lock(hw)) { + ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); + return -EBUSY; + } + + err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + /* Calculate the amount required to add to the port time in order for + * it to match the PHC time. + * + * Note that the port adjustment is done using 2s complement + * arithmetic. This is convenient since it means that we can simply + * calculate the difference between the PHC time and the port time, + * and it will be interpreted correctly. + */ + difference = phc_time - phy_time; + + err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference); + if (err) + goto err_unlock; + + err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME); + if (err) + goto err_unlock; + + /* Issue the sync to activate the time adjustment */ + ice_ptp_exec_tmr_cmd(hw); + + /* Re-capture the timer values to flush the command registers and + * verify that the time was properly adjusted. + */ + err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + dev_info(ice_hw_to_dev(hw), + "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", + port, (unsigned long long)phy_time, + (unsigned long long)phc_time); + + ice_ptp_unlock(hw); + + return 0; + +err_unlock: + ice_ptp_unlock(hw); + return err; +} + +/** + * ice_stop_phy_timer_e822 - Stop the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to stop + * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS + * + * Stop the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + */ +int +ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) +{ + int err; + u32 val; + + err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); + if (err) + return err; + + err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); + if (err) + return err; + + err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + if (err) + return err; + + val &= ~P_REG_PS_START_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + val &= ~P_REG_PS_ENA_CLK_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + if (soft_reset) { + val |= P_REG_PS_SFT_RESET_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_start_phy_timer_e822 - Start the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to start + * @bypass: if true, start the PHY in bypass mode + * + * Start the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Bypass mode enables timestamps immediately without waiting for Vernier + * calibration to complete. Hardware will still continue taking Vernier + * measurements on Tx or Rx of packets, but they will not be applied to + * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware + * has completed offset calculation. + */ +int +ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass) +{ + u32 lo, hi, val; + u64 incval; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + err = ice_stop_phy_timer_e822(hw, port, false); + if (err) + return err; + + ice_phy_cfg_lane_e822(hw, port); + + err = ice_phy_cfg_uix_e822(hw, port); + if (err) + return err; + + err = ice_phy_cfg_parpcs_e822(hw, port); + if (err) + return err; + + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + incval = (u64)hi << 32 | lo; + + err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); + if (err) + return err; + + err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + if (err) + return err; + + val |= P_REG_PS_SFT_RESET_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + val |= P_REG_PS_START_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + val &= ~P_REG_PS_SFT_RESET_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + val |= P_REG_PS_ENA_CLK_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + val |= P_REG_PS_LOAD_OFFSET_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + err = ice_sync_phy_timer_e822(hw, port); + if (err) + return err; + + if (bypass) { + val |= P_REG_PS_BYPASS_MODE_M; + /* Enter BYPASS mode, enabling timestamps immediately. */ + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) + return err; + + /* Program the fixed Tx offset */ + err = ice_phy_cfg_fixed_tx_offset_e822(hw, port); + if (err) + return err; + + /* Program the fixed Rx offset */ + err = ice_phy_cfg_fixed_rx_offset_e822(hw, port); + if (err) + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations + * @hw: pointer to the HW struct + * @port: the PHY port to configure + * + * After hardware finishes vernier calculations for the Tx and Rx offset, this + * function can be used to exit bypass mode by updating the total Tx and Rx + * offsets, and then disabling bypass. This will enable hardware to include + * the more precise offset calibrations, increasing precision of the generated + * timestamps. + * + * This cannot be done until hardware has measured the offsets, which requires + * waiting until at least one packet has been sent and received by the device. + */ +int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port) +{ + int err; + u32 val; + + err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", + port, err); + return err; + } + + if (!(val & P_REG_TX_OV_STATUS_OV_M)) { + ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n", + port); + return -EBUSY; + } + + err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", + port, err); + return err; + } + + if (!(val & P_REG_TX_OV_STATUS_OV_M)) { + ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n", + port); + return -EBUSY; + } + + err = ice_phy_cfg_tx_offset_e822(hw, port); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n", + port, err); + return err; + } + + err = ice_phy_cfg_rx_offset_e822(hw, port); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n", + port, err); + return err; + } + + /* Exit bypass mode now that the offset has been updated */ + err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n", + port, err); + return err; + } + + if (!(val & P_REG_PS_BYPASS_MODE_M)) + ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n", + port); + + val &= ~P_REG_PS_BYPASS_MODE_M; + err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n", + port, err); + return err; + } + + dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n", + port); + + return 0; +} + /* E810 functions * * The following functions operate on the E810 series devices which use @@ -68,18 +2538,18 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw) static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input msg = {0}; - int status; + int err; msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_rd; msg.dest_dev = rmn_0; - status = ice_sbq_rw_reg(hw, &msg); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n", - status); - return status; + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; } *val = msg.data; @@ -98,7 +2568,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input msg = {0}; - int status; + int err; msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); @@ -106,11 +2576,11 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.dest_dev = rmn_0; msg.data = val; - status = ice_sbq_rw_reg(hw, &msg); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n", - status); - return status; + err = ice_sbq_rw_reg(hw, &msg); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", + err); + return err; } return 0; @@ -130,23 +2600,23 @@ static int ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) { u32 lo_addr, hi_addr, lo, hi; - int status; + int err; lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); - status = ice_read_phy_reg_e810(hw, lo_addr, &lo); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n", - status); - return status; + err = ice_read_phy_reg_e810(hw, lo_addr, &lo); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", + err); + return err; } - status = ice_read_phy_reg_e810(hw, hi_addr, &hi); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n", - status); - return status; + err = ice_read_phy_reg_e810(hw, hi_addr, &hi); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", + err); + return err; } /* For E810 devices, the timestamp is reported with the lower 32 bits @@ -169,23 +2639,23 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) { u32 lo_addr, hi_addr; - int status; + int err; lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); - status = ice_write_phy_reg_e810(hw, lo_addr, 0); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, lo_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", + err); + return err; } - status = ice_write_phy_reg_e810(hw, hi_addr, 0); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, hi_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", + err); + return err; } return 0; @@ -200,17 +2670,32 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) */ int ice_ptp_init_phy_e810(struct ice_hw *hw) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), - GLTSYN_ENA_TSYN_ENA_M); - if (status) + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), + GLTSYN_ENA_TSYN_ENA_M); + if (err) ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n", - status); + err); - return status; + return err; +} + +/** + * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E810-specific PTP hardware clock initialization steps. + */ +static int ice_ptp_init_phc_e810(struct ice_hw *hw) +{ + /* Ensure synchronization delay is zero */ + wr32(hw, GLTSYN_SYNC_DLAY, 0); + + /* Initialize the PHY */ + return ice_ptp_init_phy_e810(hw); } /** @@ -227,22 +2712,22 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw) */ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n", + err); + return err; } - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n", + err); + return err; } return 0; @@ -263,26 +2748,26 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) */ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Adjustments are represented as signed 2's complement values in * nanoseconds. Sub-nanosecond adjustment is not supported. */ - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n", + err); + return err; } - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n", + err); + return err; } return 0; @@ -300,25 +2785,25 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) { u32 high, low; - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; low = lower_32_bits(incval); high = upper_32_bits(incval); - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n", + err); + return err; } - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n", - status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n", + err); + return err; } return 0; @@ -335,7 +2820,7 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { u32 cmd_val, val; - int status; + int err; switch (cmd) { case INIT_TIME: @@ -356,20 +2841,20 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) } /* Read, modify, write */ - status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status); - return status; + err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); + return err; } /* Modify necessary bits only and perform write */ val &= ~TS_CMD_MASK_E810; val |= cmd_val; - status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status); - return status; + err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); + return err; } return 0; @@ -377,12 +2862,9 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) /* Device agnostic functions * - * The following functions implement useful behavior to hide the differences - * between E810 and other devices. They call the device-specific - * implementations where necessary. - * - * Currently, the driver only supports E810, but future work will enable - * support for E822-based devices. + * The following functions implement shared behavior common to both E822 and + * E810 devices, possibly calling a device specific implementation where + * necessary. */ /** @@ -433,42 +2915,6 @@ void ice_ptp_unlock(struct ice_hw *hw) } /** - * ice_ptp_src_cmd - Prepare source timer for a timer command - * @hw: pointer to HW structure - * @cmd: Timer command - * - * Prepare the source timer for an upcoming timer sync command. - */ -static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) -{ - u32 cmd_val; - u8 tmr_idx; - - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_CPK_SRC; - - switch (cmd) { - case INIT_TIME: - cmd_val |= GLTSYN_CMD_INIT_TIME; - break; - case INIT_INCVAL: - cmd_val |= GLTSYN_CMD_INIT_INCVAL; - break; - case ADJ_TIME: - cmd_val |= GLTSYN_CMD_ADJ_TIME; - break; - case ADJ_TIME_AT_TIME: - cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; - break; - case READ_TIME: - cmd_val |= GLTSYN_CMD_READ_TIME; - break; - } - - wr32(hw, GLTSYN_CMD, cmd_val); -} - -/** * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command * @hw: pointer to HW struct * @cmd: the command to issue @@ -480,23 +2926,26 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { - int status; + int err; /* First, prepare the source timer */ ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - status = ice_ptp_port_cmd_e810(hw, cmd); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n", - cmd, status); - return status; + if (ice_is_e810(hw)) + err = ice_ptp_port_cmd_e810(hw, cmd); + else + err = ice_ptp_port_cmd_e822(hw, cmd); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", + cmd, err); + return err; } - /* Write the sync command register to drive both source and PHY timer commands - * synchronously + /* Write the sync command register to drive both source and PHY timer + * commands synchronously */ - wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); + ice_ptp_exec_tmr_cmd(hw); return 0; } @@ -516,8 +2965,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ int ice_ptp_init_time(struct ice_hw *hw, u64 time) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -528,9 +2977,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); - if (status) - return status; + if (ice_is_e810(hw)) + err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); + else + err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); + if (err) + return err; return ice_ptp_tmr_cmd(hw, INIT_TIME); } @@ -551,8 +3003,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) */ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -560,9 +3012,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - status = ice_ptp_prep_phy_incval_e810(hw, incval); - if (status) - return status; + if (ice_is_e810(hw)) + err = ice_ptp_prep_phy_incval_e810(hw, incval); + else + err = ice_ptp_prep_phy_incval_e822(hw, incval); + if (err) + return err; return ice_ptp_tmr_cmd(hw, INIT_INCVAL); } @@ -576,16 +3031,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) */ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) { - int status; + int err; if (!ice_ptp_lock(hw)) return -EBUSY; - status = ice_ptp_write_incval(hw, incval); + err = ice_ptp_write_incval(hw, incval); ice_ptp_unlock(hw); - return status; + return err; } /** @@ -603,8 +3058,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) */ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) { - int status; u8 tmr_idx; + int err; tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; @@ -616,9 +3071,12 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - status = ice_ptp_prep_phy_adj_e810(hw, adj); - if (status) - return status; + if (ice_is_e810(hw)) + err = ice_ptp_prep_phy_adj_e810(hw, adj); + else + err = ice_ptp_prep_phy_adj_e822(hw, adj); + if (err) + return err; return ice_ptp_tmr_cmd(hw, ADJ_TIME); } @@ -630,11 +3088,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) * @idx: the timestamp index to read * @tstamp: on return, the 40bit timestamp value * - * Read a 40bit timestamp value out of the timestamp block. + * Read a 40bit timestamp value out of the timestamp block. For E822 devices, + * the block is the quad to read from. For E810 devices, the block is the + * logical port to read from. */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); + if (ice_is_e810(hw)) + return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); + else + return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); } /** @@ -643,11 +3106,16 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) * @block: the block to read from * @idx: the timestamp index to reset * - * Clear a timestamp, resetting its valid bit, from the timestamp block. + * Clear a timestamp, resetting its valid bit, from the timestamp block. For + * E822 devices, the block is the quad to clear from. For E810 devices, the + * block is the logical port to clear from. */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - return ice_clear_phy_tstamp_e810(hw, block, idx); + if (ice_is_e810(hw)) + return ice_clear_phy_tstamp_e810(hw, block, idx); + else + return ice_clear_phy_tstamp_e822(hw, block, idx); } /* E810T SMA functions @@ -800,3 +3268,25 @@ bool ice_is_pca9575_present(struct ice_hw *hw) return !status && handle; } + +/** + * ice_ptp_init_phc - Initialize PTP hardware clock + * @hw: pointer to the HW struct + * + * Perform the steps required to initialize the PTP hardware clock. + */ +int ice_ptp_init_phc(struct ice_hw *hw) +{ + u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + /* Enable source clocks */ + wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); + + /* Clear event err indications for auxiliary pins */ + (void)rd32(hw, GLTSYN_STAT(src_idx)); + + if (ice_is_e810(hw)) + return ice_ptp_init_phc_e810(hw); + else + return ice_ptp_init_phc_e822(hw); +} diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index b2984b5c22c1..519e75462e67 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -12,6 +12,112 @@ enum ice_ptp_tmr_cmd { READ_TIME }; +enum ice_ptp_serdes { + ICE_PTP_SERDES_1G, + ICE_PTP_SERDES_10G, + ICE_PTP_SERDES_25G, + ICE_PTP_SERDES_40G, + ICE_PTP_SERDES_50G, + ICE_PTP_SERDES_100G +}; + +enum ice_ptp_link_spd { + ICE_PTP_LNK_SPD_1G, + ICE_PTP_LNK_SPD_10G, + ICE_PTP_LNK_SPD_25G, + ICE_PTP_LNK_SPD_25G_RS, + ICE_PTP_LNK_SPD_40G, + ICE_PTP_LNK_SPD_50G, + ICE_PTP_LNK_SPD_50G_RS, + ICE_PTP_LNK_SPD_100G_RS, + NUM_ICE_PTP_LNK_SPD /* Must be last */ +}; + +enum ice_ptp_fec_mode { + ICE_PTP_FEC_MODE_NONE, + ICE_PTP_FEC_MODE_CLAUSE74, + ICE_PTP_FEC_MODE_RS_FEC +}; + +/** + * struct ice_time_ref_info_e822 + * @pll_freq: Frequency of PLL that drives timer ticks in Hz + * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L + * @pps_delay: propagation delay of the PPS output signal + * + * Characteristic information for the various TIME_REF sources possible in the + * E822 devices + */ +struct ice_time_ref_info_e822 { + u64 pll_freq; + u64 nominal_incval; + u8 pps_delay; +}; + +/** + * struct ice_vernier_info_e822 + * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS + * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS + * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS + * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS + * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS + * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS + * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS + * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS + * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds + * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment + * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds + * + * Table of constants used during as part of the Vernier calibration of the Tx + * and Rx timestamps. This includes frequency values used to compute TUs per + * PAR/PCS clock cycle, and static delay values measured during hardware + * design. + * + * Note that some values are not used for all link speeds, and the + * P_REG_DESK_PAR* registers may represent different clock markers at + * different link speeds, either the deskew marker for multi-lane link speeds + * or the Reed Solomon gearbox marker for RS-FEC. + */ +struct ice_vernier_info_e822 { + u32 tx_par_clk; + u32 rx_par_clk; + u32 tx_pcs_clk; + u32 rx_pcs_clk; + u32 tx_desk_rsgb_par; + u32 rx_desk_rsgb_par; + u32 tx_desk_rsgb_pcs; + u32 rx_desk_rsgb_pcs; + u32 tx_fixed_delay; + u32 pmd_adj_divisor; + u32 rx_fixed_delay; +}; + +/** + * struct ice_cgu_pll_params_e822 + * @refclk_pre_div: Reference clock pre-divisor + * @feedback_div: Feedback divisor + * @frac_n_div: Fractional divisor + * @post_pll_div: Post PLL divisor + * + * Clock Generation Unit parameters used to program the PLL based on the + * selected TIME_REF frequency. + */ +struct ice_cgu_pll_params_e822 { + u32 refclk_pre_div; + u32 feedback_div; + u32 frac_n_div; + u32 post_pll_div; +}; + +extern const struct +ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; + +/* Table of constants related to possible TIME_REF sources */ +extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ]; + +/* Table of constants for Vernier calibration on E822 */ +extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD]; + /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. */ @@ -27,6 +133,59 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj); int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); +int ice_ptp_init_phc(struct ice_hw *hw); + +/* E822 family functions */ +int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val); +int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val); +int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); +int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val); +int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time); + +/** + * ice_e822_time_ref - Get the current TIME_REF from capabilities + * @hw: pointer to the HW structure + * + * Returns the current TIME_REF from the capabilities structure. + */ +static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw) +{ + return hw->func_caps.ts_func_info.time_ref; +} + +/** + * ice_set_e822_time_ref - Set new TIME_REF + * @hw: pointer to the HW structure + * @time_ref: new TIME_REF to set + * + * Update the TIME_REF in the capabilities structure in response to some + * change, such as an update to the CGU registers. + */ +static inline void +ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) +{ + hw->func_caps.ts_func_info.time_ref = time_ref; +} + +static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref) +{ + return e822_time_ref[time_ref].pll_freq; +} + +static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref) +{ + return e822_time_ref[time_ref].nominal_incval; +} + +static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref) +{ + return e822_time_ref[time_ref].pps_delay; +} + +/* E822 Vernier calibration functions */ +int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass); +int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port); /* E810 family functions */ int ice_ptp_init_phy_e810(struct ice_hw *hw); @@ -36,19 +195,194 @@ bool ice_is_pca9575_present(struct ice_hw *hw); #define PFTSYN_SEM_BYTES 4 +#define ICE_PTP_CLOCK_INDEX_0 0x00 +#define ICE_PTP_CLOCK_INDEX_1 0x01 + /* PHY timer commands */ #define SEL_CPK_SRC 8 +#define SEL_PHY_SRC 3 /* Time Sync command Definitions */ #define GLTSYN_CMD_INIT_TIME BIT(0) #define GLTSYN_CMD_INIT_INCVAL BIT(1) +#define GLTSYN_CMD_INIT_TIME_INCVAL (BIT(0) | BIT(1)) #define GLTSYN_CMD_ADJ_TIME BIT(2) #define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3)) #define GLTSYN_CMD_READ_TIME BIT(7) +/* PHY port Time Sync command definitions */ +#define PHY_CMD_INIT_TIME BIT(0) +#define PHY_CMD_INIT_INCVAL BIT(1) +#define PHY_CMD_ADJ_TIME (BIT(0) | BIT(1)) +#define PHY_CMD_ADJ_TIME_AT_TIME (BIT(0) | BIT(2)) +#define PHY_CMD_READ_TIME (BIT(0) | BIT(1) | BIT(2)) + #define TS_CMD_MASK_E810 0xFF +#define TS_CMD_MASK 0xF #define SYNC_EXEC_CMD 0x3 +/* Macros to derive port low and high addresses on both quads */ +#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) +#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16) +#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF) +#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16) + +/* PHY QUAD register base addresses */ +#define Q_0_BASE 0x94000 +#define Q_1_BASE 0x114000 + +/* Timestamp memory reset registers */ +#define Q_REG_TS_CTRL 0x618 +#define Q_REG_TS_CTRL_S 0 +#define Q_REG_TS_CTRL_M BIT(0) + +/* Timestamp availability status registers */ +#define Q_REG_TX_MEMORY_STATUS_L 0xCF0 +#define Q_REG_TX_MEMORY_STATUS_U 0xCF4 + +/* Tx FIFO status registers */ +#define Q_REG_FIFO23_STATUS 0xCF8 +#define Q_REG_FIFO01_STATUS 0xCFC +#define Q_REG_FIFO02_S 0 +#define Q_REG_FIFO02_M ICE_M(0x3FF, 0) +#define Q_REG_FIFO13_S 10 +#define Q_REG_FIFO13_M ICE_M(0x3FF, 10) + +/* Interrupt control Config registers */ +#define Q_REG_TX_MEM_GBL_CFG 0xC08 +#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0 +#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0) +#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1 +#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1) +#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9 +#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9) +#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15 +#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15) + +/* Tx Timestamp data registers */ +#define Q_REG_TX_MEMORY_BANK_START 0xA00 + +/* PHY port register base addresses */ +#define P_0_BASE 0x80000 +#define P_4_BASE 0x106000 + +/* Timestamp init registers */ +#define P_REG_RX_TIMER_INC_PRE_L 0x46C +#define P_REG_RX_TIMER_INC_PRE_U 0x470 +#define P_REG_TX_TIMER_INC_PRE_L 0x44C +#define P_REG_TX_TIMER_INC_PRE_U 0x450 + +/* Timestamp match and adjust target registers */ +#define P_REG_RX_TIMER_CNT_ADJ_L 0x474 +#define P_REG_RX_TIMER_CNT_ADJ_U 0x478 +#define P_REG_TX_TIMER_CNT_ADJ_L 0x454 +#define P_REG_TX_TIMER_CNT_ADJ_U 0x458 + +/* Timestamp capture registers */ +#define P_REG_RX_CAPTURE_L 0x4D8 +#define P_REG_RX_CAPTURE_U 0x4DC +#define P_REG_TX_CAPTURE_L 0x4B4 +#define P_REG_TX_CAPTURE_U 0x4B8 + +/* Timestamp PHY incval registers */ +#define P_REG_TIMETUS_L 0x410 +#define P_REG_TIMETUS_U 0x414 + +#define P_REG_40B_LOW_M 0xFF +#define P_REG_40B_HIGH_S 8 + +/* PHY window length registers */ +#define P_REG_WL 0x40C + +#define PTP_VERNIER_WL 0x111ed + +/* PHY start registers */ +#define P_REG_PS 0x408 +#define P_REG_PS_START_S 0 +#define P_REG_PS_START_M BIT(0) +#define P_REG_PS_BYPASS_MODE_S 1 +#define P_REG_PS_BYPASS_MODE_M BIT(1) +#define P_REG_PS_ENA_CLK_S 2 +#define P_REG_PS_ENA_CLK_M BIT(2) +#define P_REG_PS_LOAD_OFFSET_S 3 +#define P_REG_PS_LOAD_OFFSET_M BIT(3) +#define P_REG_PS_SFT_RESET_S 11 +#define P_REG_PS_SFT_RESET_M BIT(11) + +/* PHY offset valid registers */ +#define P_REG_TX_OV_STATUS 0x4D4 +#define P_REG_TX_OV_STATUS_OV_S 0 +#define P_REG_TX_OV_STATUS_OV_M BIT(0) +#define P_REG_RX_OV_STATUS 0x4F8 +#define P_REG_RX_OV_STATUS_OV_S 0 +#define P_REG_RX_OV_STATUS_OV_M BIT(0) + +/* PHY offset ready registers */ +#define P_REG_TX_OR 0x45C +#define P_REG_RX_OR 0x47C + +/* PHY total offset registers */ +#define P_REG_TOTAL_RX_OFFSET_L 0x460 +#define P_REG_TOTAL_RX_OFFSET_U 0x464 +#define P_REG_TOTAL_TX_OFFSET_L 0x440 +#define P_REG_TOTAL_TX_OFFSET_U 0x444 + +/* Timestamp PAR/PCS registers */ +#define P_REG_UIX66_10G_40G_L 0x480 +#define P_REG_UIX66_10G_40G_U 0x484 +#define P_REG_UIX66_25G_100G_L 0x488 +#define P_REG_UIX66_25G_100G_U 0x48C +#define P_REG_DESK_PAR_RX_TUS_L 0x490 +#define P_REG_DESK_PAR_RX_TUS_U 0x494 +#define P_REG_DESK_PAR_TX_TUS_L 0x498 +#define P_REG_DESK_PAR_TX_TUS_U 0x49C +#define P_REG_DESK_PCS_RX_TUS_L 0x4A0 +#define P_REG_DESK_PCS_RX_TUS_U 0x4A4 +#define P_REG_DESK_PCS_TX_TUS_L 0x4A8 +#define P_REG_DESK_PCS_TX_TUS_U 0x4AC +#define P_REG_PAR_RX_TUS_L 0x420 +#define P_REG_PAR_RX_TUS_U 0x424 +#define P_REG_PAR_TX_TUS_L 0x428 +#define P_REG_PAR_TX_TUS_U 0x42C +#define P_REG_PCS_RX_TUS_L 0x430 +#define P_REG_PCS_RX_TUS_U 0x434 +#define P_REG_PCS_TX_TUS_L 0x438 +#define P_REG_PCS_TX_TUS_U 0x43C +#define P_REG_PAR_RX_TIME_L 0x4F0 +#define P_REG_PAR_RX_TIME_U 0x4F4 +#define P_REG_PAR_TX_TIME_L 0x4CC +#define P_REG_PAR_TX_TIME_U 0x4D0 +#define P_REG_PAR_PCS_RX_OFFSET_L 0x4E8 +#define P_REG_PAR_PCS_RX_OFFSET_U 0x4EC +#define P_REG_PAR_PCS_TX_OFFSET_L 0x4C4 +#define P_REG_PAR_PCS_TX_OFFSET_U 0x4C8 +#define P_REG_LINK_SPEED 0x4FC +#define P_REG_LINK_SPEED_SERDES_S 0 +#define P_REG_LINK_SPEED_SERDES_M ICE_M(0x7, 0) +#define P_REG_LINK_SPEED_FEC_MODE_S 3 +#define P_REG_LINK_SPEED_FEC_MODE_M ICE_M(0x3, 3) +#define P_REG_LINK_SPEED_FEC_MODE(reg) \ + (((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >> \ + P_REG_LINK_SPEED_FEC_MODE_S) + +/* PHY timestamp related registers */ +#define P_REG_PMD_ALIGNMENT 0x0FC +#define P_REG_RX_80_TO_160_CNT 0x6FC +#define P_REG_RX_80_TO_160_CNT_RXCYC_S 0 +#define P_REG_RX_80_TO_160_CNT_RXCYC_M BIT(0) +#define P_REG_RX_40_TO_160_CNT 0x8FC +#define P_REG_RX_40_TO_160_CNT_RXCYC_S 0 +#define P_REG_RX_40_TO_160_CNT_RXCYC_M ICE_M(0x3, 0) + +/* Rx FIFO status registers */ +#define P_REG_RX_OV_FS 0x4F8 +#define P_REG_RX_OV_FS_FIFO_STATUS_S 2 +#define P_REG_RX_OV_FS_FIFO_STATUS_M ICE_M(0x3FF, 2) + +/* Timestamp command registers */ +#define P_REG_TX_TMR_CMD 0x448 +#define P_REG_RX_TMR_CMD 0x468 + /* E810 timesync enable register */ #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) @@ -68,9 +402,20 @@ bool ice_is_pca9575_present(struct ice_hw *hw); /* Timestamp block macros */ #define TS_LOW_M 0xFFFFFFFF +#define TS_HIGH_M 0xFF #define TS_HIGH_S 32 +#define TS_PHY_LOW_M 0xFF +#define TS_PHY_HIGH_M 0xFFFFFFFF +#define TS_PHY_HIGH_S 8 + #define BYTES_PER_IDX_ADDR_L_U 8 +#define BYTES_PER_IDX_ADDR_L 4 + +/* Internal PHY timestamp address */ +#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U)) +#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \ + BYTES_PER_IDX_ADDR_L)) /* External PHY timestamp address */ #define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index af8e6ef5f571..dcc310e29300 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -244,6 +244,14 @@ static int ice_repr_add(struct ice_vf *vf) if (!repr) return -ENOMEM; +#ifdef CONFIG_ICE_SWITCHDEV + repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL); + if (!repr->mac_rule) { + err = -ENOMEM; + goto err_alloc_rule; + } +#endif + repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv)); if (!repr->netdev) { err = -ENOMEM; @@ -287,6 +295,11 @@ err_alloc_q_vector: free_netdev(repr->netdev); repr->netdev = NULL; err_alloc: +#ifdef CONFIG_ICE_SWITCHDEV + kfree(repr->mac_rule); + repr->mac_rule = NULL; +err_alloc_rule: +#endif kfree(repr); vf->repr = NULL; return err; @@ -304,6 +317,10 @@ static void ice_repr_rem(struct ice_vf *vf) unregister_netdev(vf->repr->netdev); free_netdev(vf->repr->netdev); vf->repr->netdev = NULL; +#ifdef CONFIG_ICE_SWITCHDEV + kfree(vf->repr->mac_rule); + vf->repr->mac_rule = NULL; +#endif kfree(vf->repr); vf->repr = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index 806de22933c6..0c77ff050d15 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -13,6 +13,11 @@ struct ice_repr { struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; +#ifdef CONFIG_ICE_SWITCHDEV + /* info about slow path MAC rule */ + struct ice_rule_query_data *mac_rule; + u8 rule_added; +#endif }; int ice_repr_add_for_all_vfs(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ce3c7bded4cb..7947223536e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -11,7 +11,7 @@ * This function inserts the root node of the scheduling tree topology * to the SW DB. */ -static enum ice_status +static int ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { @@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi, struct ice_hw *hw; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); if (!root) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } memcpy(&root->info, info, sizeof(*info)); @@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) * * This function sends a scheduling elements cmd (cmd_opc) */ -static enum ice_status +static int ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); @@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, * * This function inserts a scheduler node to the SW DB. */ -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; - enum ice_status status; struct ice_hw *hw; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; @@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!parent) { ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); - return ICE_ERR_PARAM; + return -EINVAL; } /* query the current node information from FW before adding it @@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (hw->max_children[layer]) { /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), @@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } } @@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, * * Delete scheduling elements (0x040F) */ -static enum ice_status +static int ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) @@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, * * This function remove nodes from HW */ -static enum ice_status +static int ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; - enum ice_status status; u16 buf_size; + int status; buf_size = struct_size(buf, teid, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) * * Get default scheduler topology (0x400) */ -static enum ice_status +static int ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); @@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, * * Add scheduling elements (0x0401) */ -static enum ice_status +static int ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) @@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, * * Configure scheduling elements (0x0403) */ -static enum ice_status +static int ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) @@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -static enum ice_status +static int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, * * Suspend scheduling elements (0x0409) */ -static enum ice_status +static int ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * resume scheduling elements (0x040A) */ -static enum ice_status +static int ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { @@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, * * Query scheduler resource allocation (0x0412) */ -static enum ice_status +static int ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) @@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static enum ice_status +static int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; - enum ice_status status; __le32 *buf; + int status; buf_size = sizeof(*buf) * num_nodes; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_nodes; i++) buf[i] = cpu_to_le32(node_teids[i]); @@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->lan_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); @@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * @tc: TC number * @new_numqs: number of queues */ -static enum ice_status +static int ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; @@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; /* allocate RDMA queue contexts */ if (!vsi_ctx->rdma_q_ctx[tc]) { vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), @@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!vsi_ctx->rdma_q_ctx[tc]) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; vsi_ctx->num_rdma_q_entries[tc] = new_numqs; return 0; } @@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]); @@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) * * RL profile function to add, query, or remove profile(s) */ -static enum ice_status +static int ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.rl_profile; @@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, * * Add RL profile (0x0410) */ -static enum ice_status +static int ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) @@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, * * Remove RL profile (0x0415) */ -static enum ice_status +static int ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) @@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ -static enum ice_status +static int ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; - enum ice_status status; u16 num_profiles = 1; + int status; if (rl_info->prof_id_ref != 0) - return ICE_ERR_IN_USE; + return -EBUSY; /* Safe to remove profile ID */ buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) - return ICE_ERR_CFG; + return -EIO; /* Delete stale entry now */ list_del(&rl_info->list_entry); @@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &pi->rl_prof_list[ln], list_entry) { struct ice_hw *hw = pi->hw; - enum ice_status status; + int status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); @@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) * * This function add nodes to HW as well as to SW DB for a given layer */ -static enum ice_status +static int ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid) @@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; size_t buf_size; + int status = 0; u32 teid; buf_size = struct_size(buf, generic, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); @@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); - return ICE_ERR_CFG; + return -EIO; } *num_nodes_added = num_nodes; @@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * * Add nodes into specific HW layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, return 0; if (!parent || layer < pi->hw->sw_entry_point_layer) - return ICE_ERR_PARAM; + return -EINVAL; /* max children per node per layer */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) - return ICE_ERR_CFG; - return ICE_ERR_MAX_LIMIT; + return -EIO; + return -ENOSPC; } return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, @@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static enum ice_status +static int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, { u32 *first_teid_ptr = first_node_teid; u16 new_num_nodes = num_nodes; - enum ice_status status = 0; + int status = 0; *num_nodes_added = 0; while (*num_nodes_added < num_nodes) { @@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, if (*num_nodes_added > num_nodes) { ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, *num_nodes_added); - status = ICE_ERR_CFG; + status = -EIO; break; } /* break if all the nodes are added successfully */ if (!status && (*num_nodes_added == num_nodes)) break; /* break if the error is not max limit */ - if (status && status != ICE_ERR_MAX_LIMIT) + if (status && status != -ENOSPC) break; /* Exceeded the max children */ max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; @@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; + int status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); @@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) * resources, default topology created by firmware and storing the information * in SW DB. */ -enum ice_status ice_sched_init_port(struct ice_port_info *pi) +int ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; + int status; u8 i, j; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Query the Default Topology from FW */ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Query default scheduling tree topology */ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, @@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", num_branches); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", num_elems); - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_init_port; } @@ -1300,11 +1300,11 @@ err_init_port: * * query FW for allocated scheduler resources and store in HW struct */ -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) +int ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; - enum ice_status status = 0; __le16 max_sibl; + int status = 0; u16 i; if (hw->layer_info) @@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); if (status) @@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) sizeof(*hw->layer_info)), GFP_KERNEL); if (!hw->layer_info) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto sched_query_out; } @@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ -static enum ice_status +static int ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; + int status; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ -static enum ice_status +static int ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; + int status; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { @@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, &first_node_teid, &num_added); if (status || num_nodes[i] != num_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (i == vsil) parent->vsi_handle = vsi_handle; @@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, * * This function adds a new VSI into scheduler tree */ -static enum ice_status +static int ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; @@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); @@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) * * This function updates the VSI child nodes based on the number of queues */ -static enum ice_status +static int ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { @@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; + int status = 0; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; if (owner == ICE_SCHED_NODE_OWNER_LAN) prev_numqs = vsi_ctx->sched.max_lanq[tc]; @@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) - return ICE_ERR_PARAM; + return -EINVAL; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); /* suspend the VSI if TC is not enabled */ @@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_CFG; + return -EIO; vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; @@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ -static enum ice_status +static int ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { - enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; + int status = -EINVAL; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); @@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (ice_sched_is_leaf_node_present(vsi_node)) { ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); - status = ICE_ERR_IN_USE; + status = -EBUSY; goto exit_sched_rm_vsi_cfg; } while (j < vsi_node->num_children) { @@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg: * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } @@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) * This function clears the VSI and its RDMA children nodes from scheduler tree * for all TCs. */ -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); } @@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent, * * This function move the child nodes to a given parent. */ -static enum ice_status +static int ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; - enum ice_status status = 0; u16 i, grps_movd = 0; struct ice_hw *hw; + int status = 0; u16 buf_len; hw = pi->hw; if (!parent || !num_items) - return ICE_ERR_PARAM; + return -EINVAL; /* Does parent have enough space */ if (parent->num_children + num_items > hw->max_children[parent->tx_sched_layer]) - return ICE_ERR_AQ_FULL; + return -ENOSPC; buf_len = struct_size(buf, teid, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto move_err_exit; } @@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, &grps_movd, NULL); if (status && grps_movd != 1) { - status = ICE_ERR_CFG; + status = -EIO; goto move_err_exit; } @@ -2251,28 +2251,28 @@ move_err_exit: * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ -static enum ice_status +static int ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; - enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; + int status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) @@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, parent = parent->children[0]; if (!parent) - return ICE_ERR_CFG; + return -EIO; } move_nodes: @@ -2333,14 +2333,14 @@ move_nodes: * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ -static enum ice_status +static int ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; - enum ice_status status = 0; + int status = 0; list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, list_entry) { @@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) * This function removes the aggregator node and intermediate nodes if any * from the given TC */ -static enum ice_status +static int ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; @@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; /* Can't remove the aggregator node if it has children */ if (ice_sched_is_agg_inuse(pi, agg_node)) - return ICE_ERR_IN_USE; + return -EBUSY; /* need to remove the whole subtree if aggregator node is the * only child. @@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) struct ice_sched_node *parent = agg_node->parent; if (!parent) - return ICE_ERR_CFG; + return -EIO; if (parent->num_children > 1) break; @@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { - enum ice_status status = 0; + int status = 0; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) @@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc: * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, unsigned long *tc_bitmap) { @@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, * This function creates an aggregator node and intermediate nodes if required * for the given TC */ -static enum ice_status +static int ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; + int status = 0; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) - return ICE_ERR_CFG; + return -EIO; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); /* Does Agg node already exist ? */ @@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) parent = tc_node; for (i = hw->sw_entry_point_layer; i <= aggl; i++) { if (!parent) - return ICE_ERR_CFG; + return -EIO; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status || num_nodes[i] != num_nodes_added) - return ICE_ERR_CFG; + return -EIO; /* The newly added node can be a new parent for the next * layer nodes @@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, unsigned long *tc_bitmap) { struct ice_sched_agg_info *agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); @@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info), GFP_KERNEL); if (!agg_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; agg_info->agg_id = agg_id; agg_info->agg_type = agg_type; @@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, * * This function configures aggregator node(s). */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); - status = ice_sched_cfg_agg(pi, agg_id, agg_type, - (unsigned long *)&bitmap); + status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap); if (!status) - status = ice_save_agg_tc_bitmap(pi, agg_id, - (unsigned long *)&bitmap); + status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap); mutex_unlock(&pi->sched_lock); return status; } @@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ -static enum ice_status +static int ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { @@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return 0; @@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, unsigned long *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; struct ice_sched_agg_info *agg_info, *old_agg_info; - enum ice_status status = 0; struct ice_hw *hw = pi->hw; + int status = 0; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) - return ICE_ERR_PARAM; + return -EINVAL; /* If the VSI is already part of another aggregator then update * its VSI info list */ @@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_vsi_info), GFP_KERNEL); if (!agg_vsi_info) - return ICE_ERR_PARAM; + return -EINVAL; /* add VSI ID into the aggregator list */ agg_vsi_info->vsi_handle = vsi_handle; @@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; - enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; + int status; buf = *info; /* Parent TEID is reserved field in this aq call */ @@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, &elem_cfgd, NULL); if (status || elem_cfgd != num_elems) { ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); - return ICE_ERR_CFG; + return -EIO; } /* Config success case */ @@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, * * This function configures node element's BW allocation. */ -static enum ice_status +static int ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { @@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); } else { - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, * * Move or associate VSI to a new or default aggregator node. */ -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { unsigned long bitmap = tc_bitmap; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, @@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) * * This function converts the BW to profile structure format. */ -static enum ice_status +static int ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { - enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; + int status = -EINVAL; bool found = false; s32 encode = 0; s64 mv = 0; @@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, profile->rl_encode = cpu_to_le16(encode); status = 0; } else { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; } return status; @@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; - enum ice_status status; struct ice_hw *hw; u8 profile_type; + int status; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) return NULL; @@ -3249,7 +3247,7 @@ exit_add_rl_prof: * * This function configures node element's BW limit. */ -static enum ice_status +static int ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { @@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, * hence only one of them may be set for any given element */ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) - return ICE_ERR_CFG; + return -EIO; data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); break; @@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && (le16_to_cpu(data->eir_bw.bw_profile_idx) != ICE_SCHED_DFLT_RL_PROF_ID)) - return ICE_ERR_CFG; + return -EIO; /* EIR BW is set to default, disable it */ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; /* Okay to enable shared BW now */ @@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, break; default: /* Unknown rate limit type */ - return ICE_ERR_PARAM; + return -EINVAL; } /* Configure element */ @@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ -static enum ice_status +static int ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; - enum ice_status status = 0; + int status = 0; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) - return ICE_ERR_PARAM; + return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); - if (status && status != ICE_ERR_IN_USE) + if (status && status != -EBUSY) ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } - if (status == ICE_ERR_IN_USE) + if (status == -EBUSY) status = 0; return status; } @@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { - enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; u16 old_id; + int status; hw = pi->hw; switch (rl_type) { @@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); @@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi, * them may be set for any given element. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_eir_srl_excl(struct ice_port_info *pi, struct ice_sched_node *node, u8 layer_num, enum ice_rl_type rl_type, u32 bw) @@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi, * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; - enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; + int status = -EINVAL; rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); if (!rl_prof_info) @@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, * It updates node's BW limit parameters like BW RL profile ID of type CIR, * EIR, or SRL. The caller needs to hold scheduler lock. */ -static enum ice_status +static int ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { struct ice_sched_node *cfg_node = node; - enum ice_status status; + int status; struct ice_hw *hw; u8 layer_num; if (!pi) - return ICE_ERR_PARAM; + return -EINVAL; hw = pi->hw; /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(pi); layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) - return ICE_ERR_PARAM; + return -EINVAL; if (rl_type == ICE_SHARED_BW) { /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(node, layer_num); if (!cfg_node) - return ICE_ERR_CFG; + return -EIO; } /* EIR BW and Shared BW profiles are mutually exclusive and * hence only one of them may be set for any given element @@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ -static enum ice_status +static int ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) @@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ -static enum ice_status +static int ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the @@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) (node->parent && node->parent->num_children == 1))) return 0; - return ICE_ERR_CFG; + return -EIO; } /** @@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) * * Save BW information of queue type node for post replay use. */ -static enum ice_status +static int ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { @@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } return 0; } @@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) * * This function sets BW limit of queue scheduling node. */ -static enum ice_status +static int ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; + int status = -EINVAL; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; mutex_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); if (!q_ctx) @@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (sel_layer >= pi->hw->num_tx_sched_layers) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit_q_bw_lmt; } status = ice_sched_validate_srl_node(node, sel_layer); @@ -3794,7 +3792,7 @@ exit_q_bw_lmt: * * This function configures BW limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { @@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * * This function configures BW default limit of queue scheduling node. */ -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { @@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ -static enum ice_status +int ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { - enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; + int status = -EINVAL; if (!pi) return status; @@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc: * This function configures BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { @@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * This function configures default BW limit of VSI scheduling node based on TC * information. */ -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { @@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || bytes > ICE_MAX_BURST_SIZE_ALLOWED) - return ICE_ERR_PARAM; + return -EINVAL; if (ice_round_to_num(bytes, 64) <= ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { /* 64 byte granularity case */ @@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ -static enum ice_status +static int ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; - enum ice_status status; + int status; buf = node->info; data = &buf.data; @@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ -static enum ice_status +static int ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; - enum ice_status status = ICE_ERR_PARAM; + int status = -EINVAL; u16 bw_alloc; if (!node) @@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw) if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, @@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) * their node bandwidth information. This function needs to be called with * scheduler lock held. */ -static enum ice_status -ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; - enum ice_status status; + int status; bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) return 0; /* Not present in list - default Agg case */ @@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; - enum ice_status status; + int status; mutex_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); @@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; /* Following also checks the presence of node in tree */ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!q_node) - return ICE_ERR_PARAM; + return -EINVAL; return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); } diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 6bddcbecaf5e..4f91577fed56 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -65,12 +65,12 @@ struct ice_sched_agg_info { }; /* FW AQ command calls */ -enum ice_status +int ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); -enum ice_status ice_sched_init_port(struct ice_port_info *pi); -enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +int ice_sched_init_port(struct ice_port_info *pi); +int ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_get_psm_clk_freq(struct ice_hw *hw); void ice_sched_clear_port(struct ice_port_info *pi); @@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); -enum ice_status +int ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info); void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node); @@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc); struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner); -enum ice_status +int ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); -enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); -enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); +int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle); /* Tx scheduler rate limiter functions */ -enum ice_status +int ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap); -enum ice_status +int ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap); -enum ice_status +int ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type); -enum ice_status +int ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw); -enum ice_status +int ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type); -enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); +int +ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, + enum ice_agg_type agg_type, u8 tc, + enum ice_rl_type rl_type, u32 bw); +int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw); -enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); +int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); +int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index aa11d07793d4..52c6bac41bf7 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -18,7 +18,7 @@ * queue and asynchronously sending message via * ice_sq_send_cmd() function */ -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd) { @@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw, * sent per VF and marks the VF as malicious if it exceeds * the permissible number of messages to send. */ -static enum ice_status +static int ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, enum ice_mbx_snapshot_state *new_state, bool *is_malvf) @@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id, struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; if (vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* increment the message count in the VF array */ snap->mbx_vf.vf_cntr[vf_id]++; @@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap) * Detect: If pending message count exceeds watermark traverse * the static snapshot and look for a malicious VF. */ -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_malvf) @@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_snap_buffer_data *snap_buf; struct ice_ctl_q_info *cq = &hw->mailboxq; enum ice_mbx_snapshot_state new_state; - enum ice_status status = 0; + int status = 0; if (!is_malvf || !mbx_data) - return ICE_ERR_BAD_PTR; + return -EINVAL; /* When entering the mailbox state machine assume that the VF * is not malicious until detected. @@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * interrupt is not less than the defined AVF message threshold. */ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* The watermark value should not be lesser than the threshold limit * set for the number of asynchronous messages a VF can send to mailbox @@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, */ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD || mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx) - return ICE_ERR_PARAM; + return -EINVAL; new_state = ICE_MAL_VF_DETECT_STATE_INVALID; snap_buf = &snap->mbx_buf; @@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, default: new_state = ICE_MAL_VF_DETECT_STATE_INVALID; - status = ICE_ERR_CFG; + status = -EIO; } snap_buf->state = new_state; @@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw, * the input vf_id against the bitmap to verify if the VF has been * detected in any previous mailbox iterations. */ -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf) { if (!all_malvfs || !report_malvf) - return ICE_ERR_PARAM; + return -EINVAL; *report_malvf = false; if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; if (vf_id >= bitmap_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* If the vf_id is found in the bitmap set bit and boolean to true */ if (!test_and_set_bit(vf_id, all_malvfs)) @@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, * that the new VF loaded is not considered malicious before going * through the overflow detection algorithm. */ -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id) { if (!snap || !all_malvfs) - return ICE_ERR_PARAM; + return -EINVAL; if (bitmap_len < snap->mbx_vf.vfcntr_len) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; /* Ensure VF ID value is not larger than bitmap or VF counter length */ if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */ clear_bit(vf_id, all_malvfs); @@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, * called to ensure that the vf_count can be compared against the number * of VFs supported as defined in the functional capabilities of the device. */ -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) { struct ice_mbx_snapshot *snap = &hw->mbx_snapshot; @@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count) * the functional capabilities of the PF. */ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs) - return ICE_ERR_INVAL_SIZE; + return -EINVAL; snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); if (!snap->mbx_vf.vf_cntr) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Setting the VF counter length to the number of allocated * VFs for given PF's functional capabilities. diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 161dc55d9e9c..68686a3fd7e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -14,24 +14,24 @@ #define ICE_ASYNC_VF_MSG_THRESHOLD 63 #ifdef CONFIG_PCI_IOV -enum ice_status +int ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); -enum ice_status +int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, u16 vf_id, bool *is_mal_vf); -enum ice_status +int ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id); -enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); +int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count); void ice_mbx_deinit_snapshot(struct ice_hw *hw); -enum ice_status +int ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs, u16 bitmap_len, u16 vf_id, bool *report_malvf); #else /* CONFIG_PCI_IOV */ -static inline enum ice_status +static inline int ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, u16 __always_unused vfid, u32 __always_unused v_opcode, u32 __always_unused v_retval, u8 __always_unused *msg, diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h deleted file mode 100644 index dbf66057371d..000000000000 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, Intel Corporation. */ - -#ifndef _ICE_STATUS_H_ -#define _ICE_STATUS_H_ - -/* Error Codes */ -enum ice_status { - ICE_SUCCESS = 0, - - /* Generic codes : Range -1..-49 */ - ICE_ERR_PARAM = -1, - ICE_ERR_NOT_IMPL = -2, - ICE_ERR_NOT_READY = -3, - ICE_ERR_NOT_SUPPORTED = -4, - ICE_ERR_BAD_PTR = -5, - ICE_ERR_INVAL_SIZE = -6, - ICE_ERR_DEVICE_NOT_SUPPORTED = -8, - ICE_ERR_RESET_FAILED = -9, - ICE_ERR_FW_API_VER = -10, - ICE_ERR_NO_MEMORY = -11, - ICE_ERR_CFG = -12, - ICE_ERR_OUT_OF_RANGE = -13, - ICE_ERR_ALREADY_EXISTS = -14, - ICE_ERR_DOES_NOT_EXIST = -15, - ICE_ERR_IN_USE = -16, - ICE_ERR_MAX_LIMIT = -17, - ICE_ERR_RESET_ONGOING = -18, - ICE_ERR_HW_TABLE = -19, - ICE_ERR_FW_DDP_MISMATCH = -20, - - ICE_ERR_NVM = -50, - ICE_ERR_NVM_CHECKSUM = -51, - ICE_ERR_BUF_TOO_SHORT = -52, - ICE_ERR_NVM_BLANK_MODE = -53, - ICE_ERR_AQ_ERROR = -100, - ICE_ERR_AQ_TIMEOUT = -101, - ICE_ERR_AQ_FULL = -102, - ICE_ERR_AQ_NO_WORK = -103, - ICE_ERR_AQ_EMPTY = -104, - ICE_ERR_AQ_FW_CRITICAL = -105, -}; - -#endif /* _ICE_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 793f4a9fc2cd..11ae0bee3590 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) +int ice_init_def_sw_recp(struct ice_hw *hw) { struct ice_sw_recipe *recps; u8 i; @@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, sizeof(*recps), GFP_KERNEL); if (!recps) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; @@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) * in response buffer. The caller of this function to use *num_elems while * parsing the response buffer. */ -static enum ice_status +static int ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; @@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, * * Add a VSI context to the hardware (0x0210) */ -static enum ice_status +static int ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; res = &desc.params.add_update_free_vsi_res; @@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware (0x0213) */ -static enum ice_status +static int ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware (0x0211) */ -static enum ice_status +static int ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; @@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw) * If this function gets called after reset for existing VSIs then update * with the new HW VSI number in the corresponding VSI handle list entry. */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_vsi_ctx *tmp_vsi_ctx; - enum ice_status status; + int status; if (vsi_handle >= ICE_MAX_VSI) - return ICE_ERR_PARAM; + return -EINVAL; status = ice_aq_add_vsi(hw, vsi_ctx, cd); if (status) return status; @@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, sizeof(*tmp_vsi_ctx), GFP_KERNEL); if (!tmp_vsi_ctx) { ice_aq_free_vsi(hw, vsi_ctx, false, cd); - return ICE_ERR_NO_MEMORY; + return -ENOMEM; } *tmp_vsi_ctx = *vsi_ctx; ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); @@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Free VSI context info from hardware as well as from VSI handle list */ -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { - enum ice_status status; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); if (!status) @@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, * * Update VSI context in the hardware */ -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); return ice_aq_update_vsi(hw, vsi_ctx, cd); } @@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) else ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; - return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); + return ice_update_vsi(hw, vsi_handle, ctx, NULL); } /** @@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) * * allocates or free a VSI list resource */ -static enum ice_status +static int ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *vsi_ele; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); if (lkup_type == ICE_SW_LKUP_MAC || @@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { - status = ICE_ERR_PARAM; + status = -EINVAL; goto ice_aq_alloc_free_vsi_list_exit; } @@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit: * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; - enum ice_status status; + int status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_remove_sw_rules) - return ICE_ERR_PARAM; + return -EINVAL; ice_fill_dflt_direct_cmd_desc(&desc, opc); @@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; return status; } @@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, * * Add(0x0290) */ -static enum ice_status +static int ice_aq_add_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 num_recipes, struct ice_sq_cd *cd) @@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw, * The caller must supply enough space in s_recipe_list to hold all possible * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. */ -static enum ice_status +static int ice_aq_get_recipe(struct ice_hw *hw, struct ice_aqc_recipe_data_elem *s_recipe_list, u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) { struct ice_aqc_add_get_recipe *cmd; struct ice_aq_desc desc; - enum ice_status status; u16 buf_size; + int status; if (*num_recipes != ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; cmd = &desc.params.add_get_recipe; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); @@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw, * @cd: pointer to command details structure or NULL * Recipe to profile association (0x0291) */ -static enum ice_status +static int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { @@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @cd: pointer to command details structure or NULL * Associate profile ID with given recipe (0x0293) */ -static enum ice_status +static int ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, struct ice_sq_cd *cd) { struct ice_aqc_recipe_to_profile *cmd; struct ice_aq_desc desc; - enum ice_status status; + int status; cmd = &desc.params.recipe_to_profile; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); @@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call */ -static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) +static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; u16 buf_len; + int status; buf_len = struct_size(sw_buf, elem, 1); sw_buf = kzalloc(buf_len, GFP_KERNEL); if (!sw_buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; sw_buf->num_elems = cpu_to_le16(1); sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << @@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * bookkeeping so that we have a current list of all the recipes that are * programmed in the firmware. */ -static enum ice_status +static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, bool *refresh_required) { @@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, struct ice_aqc_recipe_data_elem *tmp; u16 num_recps = ICE_MAX_NUM_RECIPES; struct ice_prot_lkup_ext *lkup_exts; - enum ice_status status; u8 fv_word_idx = 0; u16 sub_recps; + int status; bitmap_zero(result_bm, ICE_MAX_FV_WORDS); /* we need a buffer big enough to accommodate all the recipes */ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp[0].recipe_indx = rid; status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); @@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), GFP_KERNEL); if (!rg_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), GFP_KERNEL); if (!recps[rid].root_buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } @@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, /* ice_get_initial_sw_cfg - Get initial port and default VSI data * @hw: pointer to the hardware structure */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) +int ice_get_initial_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; - enum ice_status status; u16 req_desc = 0; u16 num_elems; + int status; u16 i; rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL); if (!rbuf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need @@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action */ -static enum ice_status +static int ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { @@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; - enum ice_status status; u16 lg_act_size; u16 rules_size; + int status; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: @@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); if (!lg_act) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); @@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * Call AQ command to add a new switch rule or update existing switch rule * using the given VSI list ID */ -static enum ice_status +static int ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; u16 rule_type; + int status; int i; if (!num_vsi) - return ICE_ERR_PARAM; + return -EINVAL; if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || @@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else - return ICE_ERR_PARAM; + return -EINVAL; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < num_vsi; i++) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto exit; } /* AQ call requires hw_vsi_id(s) */ @@ -1869,11 +1869,11 @@ exit: * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ -static enum ice_status +static int ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { - enum ice_status status; + int status; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); @@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * to the corresponding filter management list to track this switch rule * and VSI mapping */ -static enum ice_status +static int ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { @@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; struct ice_sw_recipe *recp; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_create_pkt_fwd_rule_exit; } @@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit: * Call AQ command to update a previously created switch rule with a * VSI list ID */ -static enum ice_status +static int ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; + int status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); @@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) * * Updates unicast switch filter rules based on VEB/VEPA mode */ -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ + int status = 0; rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; @@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_add_update_vsi_list(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_entry, struct ice_fltr_info *cur_fltr, struct ice_fltr_info *new_fltr) { - enum ice_status status = 0; u16 vsi_list_id = 0; + int status = 0; if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->fltr_act == ICE_FWD_TO_Q || new_fltr->fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->fltr_act == ICE_FWD_TO_VSI || cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle; @@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, vsi_list_id); if (!m_entry->vsi_list_info) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list @@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, enum ice_adminq_opc opcode; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, * * Adds or updates the rule lists for a given recipe */ -static enum ice_status +static int ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { @@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, * The VSI list should be emptied before this function is called to remove the * VSI list. */ -static enum ice_status +static int ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; u16 s_rule_size; + int status; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); @@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_list) { enum ice_sw_lkup_type lkup_type; - enum ice_status status = 0; u16 vsi_list_id; + int status = 0; if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = fm_list->fltr_info.lkup_type; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; @@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * @recp_id: recipe ID for which the rule needs to removed * @f_entry: rule entry containing filter information */ -static enum ice_status +static int ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *list_elem; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; bool remove_rule = false; u16 vsi_handle; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, mutex_lock(rule_lock); list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); if (!list_elem) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { remove_rule = true; } else if (!list_elem->vsi_list_info) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } else if (list_elem->vsi_list_info->ref_cnt > 1) { /* a ref_cnt > 1 indicates that the vsi_list is being @@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto exit; } @@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) * check for duplicates in this case, removing duplicates from a given * list should be taken care of in the caller of this function. */ -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; @@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) u16 total_elem_left, s_rule_size; struct ice_switch_info *sw; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; u16 num_unicast = 0; + int status = 0; u8 elem_sent; if (!m_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; s_rule = NULL; sw = hw->switch_info; @@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) m_list_itr->fltr_info.flag = ICE_FLTR_TX; vsi_handle = m_list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; m_list_itr->fltr_info.src = hw_vsi_id; if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || is_zero_ether_addr(add)) - return ICE_ERR_PARAM; + return -EINVAL; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ mutex_lock(rule_lock); if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, &m_list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; } mutex_unlock(rule_lock); num_unicast++; @@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); if (!s_rule) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } @@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), GFP_KERNEL); if (!fm_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto ice_add_mac_exit; } fm_entry->fltr_info = *f_info; @@ -2737,7 +2737,7 @@ ice_add_mac_exit: * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information */ -static enum ice_status +static int ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { struct ice_switch_info *sw = hw->switch_info; @@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) enum ice_sw_lkup_type lkup_type; u16 vsi_list_id = 0, vsi_handle; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); @@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) - return ICE_ERR_PARAM; + return -EINVAL; if (new_fltr->src_id != ICE_SRC_ID_VSI) - return ICE_ERR_PARAM; + return -EINVAL; new_fltr->src = new_fltr->fwd_id.hw_vsi_id; lkup_type = new_fltr->lkup_type; @@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { - status = ICE_ERR_DOES_NOT_EXIST; + status = -ENOENT; goto exit; } /* reuse VSI list for new rule and increment ref_cnt */ @@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); - status = ICE_ERR_CFG; + status = -EIO; goto exit; } @@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* A rule already exists with the new VSI being added */ if (cur_handle == vsi_handle) { - status = ICE_ERR_ALREADY_EXISTS; + status = -EEXIST; goto exit; } @@ -2890,16 +2890,16 @@ exit: * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(v_list_itr, v_list, list_entry) { if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); if (v_list_itr->status) @@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) * the filter list with the necessary fields (including flags to * indicate Tx or Rx rules). */ -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(em_list_itr, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr); @@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) * @hw: pointer to the hardware structure * @em_list: list of ethertype or ethertype MAC entries */ -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) { struct ice_fltr_list_entry *em_list_itr, *tmp; if (!em_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { enum ice_sw_lkup_type l_type = @@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) - return ICE_ERR_PARAM; + return -EINVAL; em_list_itr->status = ice_remove_rule_internal(hw, l_type, em_list_itr); @@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) * add filter rule to set/unset given VSI as default VSI for the switch * (represented by swid) */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; enum ice_adminq_opc opcode; - enum ice_status status; u16 s_rule_size; u16 hw_vsi_id; + int status; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : @@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; memset(&f_info, 0, sizeof(f_info)); @@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, * This function removes either a MAC filter rule or a specific VSI from a * VSI list for a multicast MAC address. * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. + * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should + * be aware that this call will only work if all the entries passed into m_list + * were added previously. It will not attempt to do a partial remove of entries + * that were found. */ -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_fltr_list_entry *list_itr, *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ if (!m_list) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { @@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; + return -EINVAL; vsi_handle = list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_itr->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); @@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, &list_itr->fltr_info)) { mutex_unlock(rule_lock); - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; } mutex_unlock(rule_lock); } @@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information */ -enum ice_status -ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) - return ICE_ERR_PARAM; + return -EINVAL; v_list_itr->status = ice_remove_rule_internal(hw, ICE_SW_LKUP_VLAN, v_list_itr); @@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) * fltr_info.fwd_id fields. These are set such that later logic can * extract which VSI to remove the fltr from, and pass on that information. */ -static enum ice_status +static int ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *vsi_list_head, struct ice_fltr_info *fi) @@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, */ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; tmp->fltr_info = *fi; @@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, * Note that this means all entries in vsi_list_head must be explicitly * deallocated by the caller when done with list. */ -static enum ice_status +static int ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct list_head *lkup_list_head, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; - enum ice_status status = 0; + int status = 0; /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) @@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) * @recp_id: recipe ID for which the rule needs to removed * @v_list: list of promisc entries */ -static enum ice_status -ice_remove_promisc(struct ice_hw *hw, u8 recp_id, - struct list_head *v_list) +static int +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; @@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id, * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous */ -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { @@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct ice_fltr_mgmt_list_entry *itr; struct list_head *rule_head; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status = 0; + int status = 0; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; @@ -3444,20 +3439,20 @@ free_fltr_list: * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous */ -enum ice_status +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; struct ice_fltr_list_entry f_list_entry; struct ice_fltr_info new_fltr; - enum ice_status status = 0; bool is_tx_fltr; + int status = 0; u16 hw_vsi_id; int pkt_type; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); memset(&new_fltr, 0, sizeof(new_fltr)); @@ -3558,7 +3553,7 @@ set_promisc_exit: * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc) { @@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, struct list_head vsi_list_head; struct list_head *vlan_head; struct mutex *vlan_lock; /* Lock to protect filter rule list */ - enum ice_status status; u16 vlan_id; + int status; INIT_LIST_HEAD(&vsi_list_head); vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; @@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, struct list_head *rule_head; struct ice_fltr_list_entry *tmp; struct mutex *rule_lock; /* Lock to protect filter rule list */ - enum ice_status status; + int status; INIT_LIST_HEAD(&remove_list_head); rule_lock = &sw->recp_list[lkup].filt_rule_lock; @@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) * @num_items: number of entries requested for FD resource type * @counter_id: counter index returned by AQ call */ -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Allocate resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3719,19 +3714,19 @@ exit: * @num_items: number of entries to be freed for FD resource type * @counter_id: counter ID resource which needs to be freed */ -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *buf; - enum ice_status status; u16 buf_len; + int status; /* Free resource */ buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf->num_elems = cpu_to_le16(num_items); buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & @@ -3796,10 +3791,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * ice_find_recp - find a recipe * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match + * @tun_type: type of recipe tunnel * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ -static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) +static u16 +ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, + enum ice_sw_tunnel_type tun_type) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -3860,8 +3858,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) } /* If for "i"th recipe the found was never set to false * then it means we found our match + * Also tun type of recipe needs to be checked */ - if (found) + if (found && recp[i].tun_type == tun_type) return i; /* Return the recipe ID */ } } @@ -3937,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, * and start grouping them in 4-word groups. Each group makes up one * recipe. */ -static enum ice_status +static int ice_create_first_fit_recp_def(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, struct list_head *rg_list, @@ -3961,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, sizeof(*entry), GFP_KERNEL); if (!entry) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; list_add(&entry->l_entry, rg_list); grp = &entry->r_group; (*recp_cnt)++; @@ -3987,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw, * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ -static enum ice_status +static int ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, struct list_head *rg_list) { @@ -4029,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, * invalid pair */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; } } @@ -4071,10 +4070,8 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); u16 bit; - bitmap_zero(possible_idx, ICE_MAX_FV_WORDS); bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); bitmap_zero(used_idx, ICE_MAX_FV_WORDS); - bitmap_zero(free_idx, ICE_MAX_FV_WORDS); bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); @@ -4111,7 +4108,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, * @rm: recipe management list entry * @profiles: bitmap of profiles that will be associated. */ -static enum ice_status +static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { @@ -4119,11 +4116,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *buf; struct ice_recp_grp_entry *entry; - enum ice_status status; u16 free_res_idx; u16 recipe_count; u8 chain_idx; u8 recps = 0; + int status; /* When more than one recipe are required, another recipe is needed to * chain them together. Matching a tunnel metadata ID takes up one of @@ -4139,22 +4136,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, if (rm->n_grp_count > 1) { if (rm->n_grp_count > free_res_idx) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; rm->n_grp_count++; } if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); if (!tmp) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), GFP_KERNEL); if (!buf) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_mem; } @@ -4214,7 +4211,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, */ if (chain_idx >= ICE_MAX_FV_WORDS) { ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = ICE_ERR_MAX_LIMIT; + status = -ENOSPC; goto err_unroll; } @@ -4245,7 +4242,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[0].recipe_bitmap, rm->r_bitmap, sizeof(buf[0].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } /* Applicable only for ROOT_RECIPE, set the fwd_priority for @@ -4281,7 +4278,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, sizeof(*last_chain_entry), GFP_KERNEL); if (!last_chain_entry) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_unroll; } last_chain_entry->rid = rid; @@ -4316,7 +4313,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, sizeof(buf[recps].recipe_bitmap)); } else { - status = ICE_ERR_BAD_PTR; + status = -EINVAL; goto err_unroll; } buf[recps].content.act_ctrl_fwd_priority = rm->priority; @@ -4350,7 +4347,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, } if (!idx_found) { - status = ICE_ERR_OUT_OF_RANGE; + status = -EIO; goto err_unroll; } @@ -4403,12 +4400,12 @@ err_mem: * @rm: recipe management list entry * @lkup_exts: lookup elements */ -static enum ice_status +static int ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, struct ice_prot_lkup_ext *lkup_exts) { - enum ice_status status; u8 recp_count = 0; + int status; rm->n_grp_count = 0; @@ -4438,21 +4435,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, * @bm: bitmap of field vectors to consider * @fv_list: pointer to a list that holds the returned field vectors */ -static enum ice_status +static int ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, unsigned long *bm, struct list_head *fv_list) { - enum ice_status status; u8 *prot_ids; + int status; u16 i; prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); if (!prot_ids) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; for (i = 0; i < lkups_cnt; i++) if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { - status = ICE_ERR_CFG; + status = -EIO; goto free_mem; } @@ -4489,7 +4486,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) * @rinfo: other information regarding the rule e.g. priority and action info * @lkup_exts: lookup word structure */ -static enum ice_status +static int ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { @@ -4506,7 +4503,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; lkup_exts->field_mask[word] = mask; } else { - return ICE_ERR_MAX_LIMIT; + return -ENOSPC; } } @@ -4557,7 +4554,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, * @rinfo: other information regarding the rule e.g. priority and action info * @rid: return the recipe ID of the recipe created */ -static enum ice_status +static int ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) { @@ -4568,16 +4565,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *fvit; struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; - enum ice_status status = 0; struct ice_sw_recipe *rm; + int status = 0; u8 i; if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); if (!lkup_exts) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; /* Determine the number of words to be matched and if it exceeds a * recipe's restrictions @@ -4586,20 +4583,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } count = ice_fill_valid_words(&lkups[i], lkup_exts); if (!count) { - status = ICE_ERR_CFG; + status = -EIO; goto err_free_lkup_exts; } } rm = kzalloc(sizeof(*rm), GFP_KERNEL); if (!rm) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_free_lkup_exts; } @@ -4651,11 +4648,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts); + *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); if (*rid < ICE_MAX_NUM_RECIPES) /* Success if found a recipe that match the existing criteria */ goto err_unroll; + rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ status = ice_add_sw_recipe(hw, rm, profiles); if (status) @@ -4844,7 +4842,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt_len: packet length of dummy packet * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, const u8 *dummy_pkt, u16 pkt_len, @@ -4878,7 +4876,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } /* this should never happen in a correct calling sequence */ if (!found) - return ICE_ERR_PARAM; + return -EINVAL; switch (lkups[i].type) { case ICE_MAC_OFOS: @@ -4915,12 +4913,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, len = sizeof(struct ice_udp_tnl_hdr); break; default: - return ICE_ERR_PARAM; + return -EINVAL; } /* the length should be a word multiple */ if (len % ICE_BYTES_PER_WORD) - return ICE_ERR_CFG; + return -EIO; /* We have the offset to the header start, the length, the * caller's header values and mask. Use this information to @@ -4950,7 +4948,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * @pkt: dummy packet to fill in * @offsets: offset info for the dummy packet */ -static enum ice_status +static int ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) { @@ -4958,11 +4956,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, switch (tun_type) { case ICE_SW_TUN_VXLAN: + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) + return -EIO; + break; case ICE_SW_TUN_GENEVE: - if (!ice_get_open_tunnel_port(hw, &open_port)) - return ICE_ERR_CFG; + if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) + return -EIO; break; - default: /* Nothing needs to be done for this tunnel type */ return 0; @@ -4982,7 +4982,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, } } - return ICE_ERR_CFG; + return -EIO; } /** @@ -5047,25 +5047,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * Add the new VSI to the previously created VSI list set * using the update switch rule command */ -static enum ice_status +static int ice_adv_add_update_vsi_list(struct ice_hw *hw, struct ice_adv_fltr_mgmt_list_entry *m_entry, struct ice_adv_rule_info *cur_fltr, struct ice_adv_rule_info *new_fltr) { - enum ice_status status; u16 vsi_list_id = 0; + int status; if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) - return ICE_ERR_NOT_IMPL; + return -EOPNOTSUPP; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already @@ -5078,7 +5078,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (cur_fltr->sw_act.fwd_id.hw_vsi_id == new_fltr->sw_act.fwd_id.hw_vsi_id) - return ICE_ERR_ALREADY_EXISTS; + return -EEXIST; vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; @@ -5111,7 +5111,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->sw_act.vsi_handle; if (!m_entry->vsi_list_info) - return ICE_ERR_CFG; + return -EIO; /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) @@ -5153,7 +5153,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, * rinfo describes other information related to this rule such as forwarding * IDs, priority of this rule, etc. */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry) @@ -5164,10 +5164,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_aqc_sw_rules_elem *s_rule = NULL; struct list_head *rule_head; struct ice_switch_info *sw; - enum ice_status status; const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; + int status; u8 q_rgn; /* Initialize profile to result index bitmap */ @@ -5177,7 +5177,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!lkups_cnt) - return ICE_ERR_PARAM; + return -EINVAL; /* get # of words we need to match */ word_cnt = 0; @@ -5191,13 +5191,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) - return ICE_ERR_PARAM; + return -EINVAL; /* make sure that we can locate a dummy packet */ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, &pkt_offsets); if (!pkt) { - status = ICE_ERR_PARAM; + status = -EINVAL; goto err_ice_add_adv_rule; } @@ -5205,11 +5205,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) - return ICE_ERR_CFG; + return -EIO; vsi_handle = rinfo->sw_act.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) - return ICE_ERR_PARAM; + return -EINVAL; if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) rinfo->sw_act.fwd_id.hw_vsi_id = @@ -5243,7 +5243,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; if (!rinfo->flags_info.act_valid) { act |= ICE_SINGLE_ACT_LAN_ENABLE; act |= ICE_SINGLE_ACT_LB_ENABLE; @@ -5277,7 +5277,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ICE_SINGLE_ACT_VALID_BIT; break; default: - status = ICE_ERR_CFG; + status = -EIO; goto err_ice_add_adv_rule; } @@ -5322,14 +5322,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, sizeof(struct ice_adv_fltr_mgmt_list_entry), GFP_KERNEL); if (!adv_fltr) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, lkups_cnt * sizeof(*lkups), GFP_KERNEL); if (!adv_fltr->lkups) { - status = ICE_ERR_NO_MEMORY; + status = -ENOMEM; goto err_ice_add_adv_rule; } @@ -5372,12 +5372,12 @@ err_ice_add_adv_rule: * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. * It is required to pass valid VSI handle. */ -static enum ice_status +static int ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, struct list_head *list_head) { struct ice_fltr_mgmt_list_entry *itr; - enum ice_status status = 0; + int status = 0; u16 hw_vsi_id; if (list_empty(list_head)) @@ -5426,22 +5426,22 @@ end: * @fm_list: filter management entry for which the VSI list management needs to * be done */ -static enum ice_status +static int ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_adv_fltr_mgmt_list_entry *fm_list) { struct ice_vsi_list_map_info *vsi_list_info; enum ice_sw_lkup_type lkup_type; - enum ice_status status; u16 vsi_list_id; + int status; if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) - return ICE_ERR_PARAM; + return -EINVAL; /* A rule with the VSI being removed does not exist */ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; lkup_type = ICE_SW_LKUP_LAST; vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; @@ -5461,7 +5461,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) - return ICE_ERR_OUT_OF_RANGE; + return -EIO; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, @@ -5525,27 +5525,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, * header. rinfo describes other information related to this rule such as * forwarding IDs, priority of this rule, etc. */ -static enum ice_status +static int ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo) { struct ice_adv_fltr_mgmt_list_entry *list_elem; struct ice_prot_lkup_ext lkup_exts; - enum ice_status status = 0; bool remove_rule = false; struct mutex *rule_lock; /* Lock to protect filter rule list */ u16 i, rid, vsi_handle; + int status = 0; memset(&lkup_exts, 0, sizeof(lkup_exts)); for (i = 0; i < lkups_cnt; i++) { u16 count; if (lkups[i].type >= ICE_PROTOCOL_LAST) - return ICE_ERR_CFG; + return -EIO; count = ice_fill_valid_words(&lkups[i], &lkup_exts); if (!count) - return ICE_ERR_CFG; + return -EIO; } /* Create any special protocol/offset pairs, such as looking at tunnel @@ -5555,10 +5555,10 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) return status; - rid = ice_find_recp(hw, &lkup_exts); + rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) - return ICE_ERR_PARAM; + return -EINVAL; rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); @@ -5590,7 +5590,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) - return ICE_ERR_NO_MEMORY; + return -ENOMEM; s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(list_elem->rule_info.fltr_rule_id); @@ -5598,7 +5598,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, rule_buf_sz, 1, ice_aqc_opc_remove_sw_rules, NULL); - if (!status || status == ICE_ERR_DOES_NOT_EXIST) { + if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; mutex_lock(rule_lock); @@ -5623,7 +5623,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * the remove_entry parameter. This function will remove rule for a given * vsi_handle with a given rule_id which is passed as parameter in remove_entry */ -enum ice_status +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry) { @@ -5634,7 +5634,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, sw = hw->switch_info; if (!sw->recp_list[remove_entry->rid].recp_created) - return ICE_ERR_PARAM; + return -EINVAL; list_head = &sw->recp_list[remove_entry->rid].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { if (list_itr->rule_info.fltr_rule_id == @@ -5646,7 +5646,92 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, } } /* either list is empty or unable to find rule */ - return ICE_ERR_DOES_NOT_EXIST; + return -ENOENT; +} + +/** + * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a + * given VSI handle + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle for which we are supposed to remove all the rules. + * + * This function is used to remove all the rules for a given VSI and as soon + * as removing a rule fails, it will return immediately with the error code, + * else it will return success. + */ +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; + struct ice_vsi_list_map_info *map_info; + struct ice_adv_rule_info rinfo; + struct list_head *list_head; + struct ice_switch_info *sw; + int status; + u8 rid; + + sw = hw->switch_info; + for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { + if (!sw->recp_list[rid].recp_created) + continue; + if (!sw->recp_list[rid].adv_rule) + continue; + + list_head = &sw->recp_list[rid].filt_rules; + list_for_each_entry_safe(list_itr, tmp_entry, list_head, + list_entry) { + rinfo = list_itr->rule_info; + + if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { + map_info = list_itr->vsi_list_info; + if (!map_info) + continue; + + if (!test_bit(vsi_handle, map_info->vsi_map)) + continue; + } else if (rinfo.sw_act.vsi_handle != vsi_handle) { + continue; + } + + rinfo.sw_act.vsi_handle = vsi_handle; + status = ice_rem_adv_rule(hw, list_itr->lkups, + list_itr->lkups_cnt, &rinfo); + if (status) + return status; + } + } + return 0; +} + +/** + * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI + * @hw: pointer to the hardware structure + * @vsi_handle: driver VSI handle + * @list_head: list for which filters need to be replayed + * + * Replay the advanced rule for the given VSI. + */ +static int +ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, + struct list_head *list_head) +{ + struct ice_rule_query_data added_entry = { 0 }; + struct ice_adv_fltr_mgmt_list_entry *adv_fltr; + int status = 0; + + if (list_empty(list_head)) + return status; + list_for_each_entry(adv_fltr, list_head, list_entry) { + struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; + u16 lk_cnt = adv_fltr->lkups_cnt; + + if (vsi_handle != rinfo->sw_act.vsi_handle) + continue; + status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, + &added_entry); + if (status) + break; + } + return status; } /** @@ -5656,17 +5741,20 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, * * Replays filters for requested VSI via vsi_handle. */ -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; + int status; u8 i; - for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct list_head *head; head = &sw->recp_list[i].filt_replay_rules; - status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); + if (!sw->recp_list[i].adv_rule) + status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); + else + status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); if (status) return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index d8a38906f16f..d8334beaaa8a 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -256,7 +256,7 @@ struct ice_vsi_list_map_info { struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_status status; + int status; struct ice_fltr_info fltr_info; }; @@ -302,75 +302,69 @@ enum ice_promisc_flags { }; /* VSI related commands */ -enum ice_status +int ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); -enum ice_status +int ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); -enum ice_status +int ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); void ice_clear_all_vsi_ctx(struct ice_hw *hw); /* Switch config */ -enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); +int ice_get_initial_sw_cfg(struct ice_hw *hw); -enum ice_status +int ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id); -enum ice_status +int ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id); /* Switch/bridge related commands */ -enum ice_status +int ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_adv_rule_info *rinfo, struct ice_rule_query_data *added_entry); -enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); -enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); -enum ice_status -ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); -enum ice_status -ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); -int -ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); +int ice_update_sw_rule_bridge_mode(struct ice_hw *hw); +int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); +int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); +int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle); bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle); +int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list); +int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); -enum ice_status -ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); -enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); /* Promisc/defport setup for VSIs */ -enum ice_status -ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); -enum ice_status +int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); +int ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); -enum ice_status +int ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); -enum ice_status -ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); -enum ice_status +int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle); +int ice_rem_adv_rule_by_id(struct ice_hw *hw, struct ice_rule_query_data *remove_entry); -enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); +int ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); +int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); -enum ice_status +int ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index e5d23feb6701..e8aab664270a 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; } -static enum ice_protocol_type -ice_proto_type_from_l4_port(bool inner, u16 ip_proto) +static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) { - if (inner) { - switch (ip_proto) { - case IPPROTO_UDP: - return ICE_UDP_ILOS; - } - } else { - switch (ip_proto) { - case IPPROTO_TCP: - return ICE_TCP_IL; - case IPPROTO_UDP: - return ICE_UDP_OF; - } + switch (ip_proto) { + case IPPROTO_TCP: + return ICE_TCP_IL; + case IPPROTO_UDP: + return ICE_UDP_ILOS; } return 0; @@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) { - list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto); + if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && + hdr->l3_key.ip_proto == IPPROTO_UDP) { + list[i].type = ICE_UDP_OF; list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; i++; @@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { struct ice_tc_l4_hdr *l4_key, *l4_mask; - list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto); + list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto); l4_key = &headers->l4_key; l4_mask = &headers->l4_mask; @@ -405,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_hw *hw = &vsi->back->hw; struct ice_adv_lkup_elem *list; u32 flags = fltr->flags; - enum ice_status status; int lkups_cnt; - int ret = 0; + int ret; int i; if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { @@ -456,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) /* specify the cookie as filter_rule_id */ rule_info.fltr_rule_id = fltr->cookie; - status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added); + if (ret == -EEXIST) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; - } else if (status) { + } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); - ret = -EIO; goto exit; } @@ -802,7 +793,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, headers->l3_mask.ttl = match.mask->ttl; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) && + fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { struct flow_match_ports match; flow_rule_match_enc_ports(rule, &match); @@ -1168,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_rem.vsi_handle = fltr->dest_id; err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem); if (err) { - if (err == ICE_ERR_DOES_NOT_EXIST) { + if (err == -ENOENT) { NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist"); return -ENOENT; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index bc3ba19dc88f..3e38695f1c9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -3,8 +3,9 @@ /* The driver transmit and receive code */ -#include <linux/prefetch.h> #include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/prefetch.h> #include <linux/bpf_trace.h> #include <net/dsfield.h> #include <net/xdp.h> @@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; + /* get the bql data ready */ + if (!ice_ring_is_xdp(tx_ring)) + netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); + tx_buf = &tx_ring->tx_buf[i]; tx_desc = ICE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) if (!eop_desc) break; + /* follow the guidelines of other drivers */ + prefetchw(&tx_buf->skb->users); + smp_rmb(); /* prevent any other reads prior to eop_desc */ ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); @@ -304,8 +312,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); - netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, - total_bytes); + if (ice_ring_is_xdp(tx_ring)) + return !!budget; + + netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && @@ -314,11 +324,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, - tx_ring->q_index) && + if (netif_tx_queue_stopped(txring_txq(tx_ring)) && !test_bit(ICE_VSI_DOWN, vsi->state)) { - netif_wake_subqueue(tx_ring->netdev, - tx_ring->q_index); + netif_tx_wake_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; } } @@ -419,7 +427,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) } rx_skip_free: - memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); + if (rx_ring->xsk_pool) + memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); + else + memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); /* Zero out the descriptor ring */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), @@ -446,8 +457,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring->xdp_prog = NULL; - devm_kfree(rx_ring->dev, rx_ring->rx_buf); - rx_ring->rx_buf = NULL; + if (rx_ring->xsk_pool) { + kfree(rx_ring->xdp_buf); + rx_ring->xdp_buf = NULL; + } else { + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + } if (rx_ring->desc) { size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), @@ -475,8 +491,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_buf); rx_ring->rx_buf = - devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, - GFP_KERNEL); + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); if (!rx_ring->rx_buf) return -ENOMEM; @@ -505,7 +520,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) return 0; err: - devm_kfree(dev, rx_ring->rx_buf); + kfree(rx_ring->rx_buf); rx_ring->rx_buf = NULL; return -ENOMEM; } @@ -561,7 +576,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, goto out_failure; return ICE_XDP_REDIR; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -933,7 +948,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, */ net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -1517,7 +1532,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) */ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) { - netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); + netif_tx_stop_queue(txring_txq(tx_ring)); /* Memory barrier before checking head and tail */ smp_mb(); @@ -1525,8 +1540,8 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size) if (likely(ICE_DESC_UNUSED(tx_ring) < size)) return -EBUSY; - /* A reprieve! - use start_subqueue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_tx_start_queue(txring_txq(tx_ring)); ++tx_ring->tx_stats.restart_q; return 0; } @@ -1568,6 +1583,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, struct sk_buff *skb; skb_frag_t *frag; dma_addr_t dma; + bool kick; td_tag = off->td_l2tag1; td_cmd = off->td_cmd; @@ -1649,9 +1665,6 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, tx_buf = &tx_ring->tx_buf[i]; } - /* record bytecount for BQL */ - netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); - /* record SW timestamp if HW timestamp is not available */ skb_tx_timestamp(first->skb); @@ -1680,7 +1693,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, ice_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, + netdev_xmit_more()); + if (kick) + /* notify HW of packet */ writel(i, tx_ring->tail); return; @@ -2265,6 +2281,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) return NETDEV_TX_BUSY; } + /* prefetch for bql data which is infrequently used */ + netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); + offload.tx_ring = tx_ring; /* record the location of the first descriptor for this packet */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index c56dd1749903..b7b3bd4816f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -24,7 +24,6 @@ #define ICE_MAX_DATA_PER_TXD_ALIGNED \ (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) -#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ #define ICE_MAX_TXQ_PER_TXQG 128 /* Attempt to maximize the headroom available for incoming frames. We use a 2K diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 1dd7e84f41f8..0e87b98e0966 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019, Intel Corporation. */ +#include <linux/filter.h> + #include "ice_txrx_lib.h" #include "ice_eswitch.h" #include "ice_lib.h" @@ -187,8 +189,7 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_hash(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev - (rx_ring, rx_desc)); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); ice_rx_csum(rx_ring, skb, rx_desc, ptype); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9e0c2923c62e..546145dd1f02 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -6,8 +6,8 @@ #define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_DWORD 4 +#define ICE_CHNL_MAX_TC 16 -#include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_osdep.h" #include "ice_controlq.h" @@ -230,8 +230,8 @@ enum ice_fd_hw_seg { ICE_FD_HW_SEG_MAX, }; -/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */ -#define ICE_MAX_FDIR_VSI_PER_FILTER 2 +/* 1 ICE_VSI_PF + 1 ICE_VSI_CTRL + ICE_CHNL_MAX_TC */ +#define ICE_MAX_FDIR_VSI_PER_FILTER (2 + ICE_CHNL_MAX_TC) struct ice_fd_hw_prof { struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX]; @@ -279,6 +279,10 @@ struct ice_hw_common_caps { #define ICE_NVM_PENDING_NETLIST BIT(2) bool nvm_unified_update; #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) + /* PCIe reset avoidance */ + bool pcie_reset_avoidance; + /* Post update reset restriction */ + bool reset_restrict_support; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -295,9 +299,30 @@ struct ice_hw_common_caps { #define ICE_TS_TMR_IDX_ASSOC_S 24 #define ICE_TS_TMR_IDX_ASSOC_M BIT(24) +/* TIME_REF clock rate specification */ +enum ice_time_ref_freq { + ICE_TIME_REF_FREQ_25_000 = 0, + ICE_TIME_REF_FREQ_122_880 = 1, + ICE_TIME_REF_FREQ_125_000 = 2, + ICE_TIME_REF_FREQ_153_600 = 3, + ICE_TIME_REF_FREQ_156_250 = 4, + ICE_TIME_REF_FREQ_245_760 = 5, + + NUM_ICE_TIME_REF_FREQ +}; + +/* Clock source specification */ +enum ice_clk_src { + ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */ + ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */ + + NUM_ICE_CLK_SRC +}; + struct ice_ts_func_info { /* Function specific info */ - u32 clk_freq; + enum ice_time_ref_freq time_ref; + u8 clk_freq; u8 clk_src; u8 tmr_index_assoc; u8 ena; @@ -873,8 +898,6 @@ struct ice_hw { u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; - enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; @@ -919,6 +942,7 @@ struct ice_hw { struct mutex rss_locks; /* protect RSS configuration */ struct list_head rss_list_head; struct ice_mbx_snapshot mbx_snapshot; + DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX); u16 io_expander_handle; }; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index eee180d8c024..d64df81d4893 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf { u32 flow_id; }; -static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_TCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_SCTP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_GTPU_IP, - VIRTCHNL_PROTO_HDR_GTPU_EH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_L2TPV3, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_AH, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_ESP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV4, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = { - VIRTCHNL_PROTO_HDR_ETH, - VIRTCHNL_PROTO_HDR_IPV6, - VIRTCHNL_PROTO_HDR_UDP, - VIRTCHNL_PROTO_HDR_PFCP, - VIRTCHNL_PROTO_HDR_NONE, -}; - -struct virtchnl_fdir_pattern_match_item { - enum virtchnl_proto_hdr_type *list; - u64 input_set; - u64 *meta; -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, -}; - -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { - {vc_pattern_ipv4, 0, NULL}, - {vc_pattern_ipv4_tcp, 0, NULL}, - {vc_pattern_ipv4_udp, 0, NULL}, - {vc_pattern_ipv4_sctp, 0, NULL}, - {vc_pattern_ipv6, 0, NULL}, - {vc_pattern_ipv6_tcp, 0, NULL}, - {vc_pattern_ipv6_udp, 0, NULL}, - {vc_pattern_ipv6_sctp, 0, NULL}, - {vc_pattern_ether, 0, NULL}, - {vc_pattern_ipv4_gtpu, 0, NULL}, - {vc_pattern_ipv4_gtpu_eh, 0, NULL}, - {vc_pattern_ipv4_l2tpv3, 0, NULL}, - {vc_pattern_ipv6_l2tpv3, 0, NULL}, - {vc_pattern_ipv4_esp, 0, NULL}, - {vc_pattern_ipv6_esp, 0, NULL}, - {vc_pattern_ipv4_ah, 0, NULL}, - {vc_pattern_ipv6_ah, 0, NULL}, - {vc_pattern_ipv4_nat_t_esp, 0, NULL}, - {vc_pattern_ipv6_nat_t_esp, 0, NULL}, - {vc_pattern_ipv4_pfcp, 0, NULL}, - {vc_pattern_ipv6_pfcp, 0, NULL}, -}; - struct virtchnl_fdir_inset_map { enum virtchnl_proto_hdr_field field; enum ice_flow_field fld; @@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, struct ice_flow_seg_info *old_seg; struct ice_flow_prof *prof = NULL; struct ice_fd_hw_prof *vf_prof; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow, tun ? ICE_FLTR_PTYPE_MAX : 0); - status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, - tun + 1, &prof); - ret = ice_status_to_errno(status); + ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg, + tun + 1, &prof); if (ret) { dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n", flow, vf->vf_id); goto err_exit; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry1_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + vf_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry1_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n", flow, vf->vf_id); goto err_prof; } - status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, - ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, - seg, &entry2_h); - ret = ice_status_to_errno(status); + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, &entry2_h); if (ret) { dev_dbg(dev, "Could not add flow 0x%x Ctrl VSI entry for VF %d\n", @@ -911,83 +716,6 @@ err_exit: } /** - * ice_vc_fdir_match_pattern - * @fltr: virtual channel add cmd buffer - * @type: virtual channel protocol filter header type - * - * Matching the header type by comparing fltr and type's value. - * - * Return: true on success, and false on error. - */ -static bool -ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr, - enum virtchnl_proto_hdr_type *type) -{ - struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; - int i = 0; - - while ((i < proto->count) && - (*type == proto->proto_hdr[i].type) && - (*type != VIRTCHNL_PROTO_HDR_NONE)) { - type++; - i++; - } - - return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE)); -} - -/** - * ice_vc_fdir_get_pattern - get while list pattern - * @vf: pointer to the VF info - * @len: filter list length - * - * Return: pointer to allowed filter list - */ -static const struct virtchnl_fdir_pattern_match_item * -ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len) -{ - const struct virtchnl_fdir_pattern_match_item *item; - struct ice_pf *pf = vf->pf; - struct ice_hw *hw; - - hw = &pf->hw; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - item = vc_fdir_pattern_comms; - *len = ARRAY_SIZE(vc_fdir_pattern_comms); - } else { - item = vc_fdir_pattern_os; - *len = ARRAY_SIZE(vc_fdir_pattern_os); - } - - return item; -} - -/** - * ice_vc_fdir_search_pattern - * @vf: pointer to the VF info - * @fltr: virtual channel add cmd buffer - * - * Search for matched pattern from supported pattern list - * - * Return: 0 on success, and other on error. - */ -static int -ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr) -{ - const struct virtchnl_fdir_pattern_match_item *pattern; - int len, i; - - pattern = ice_vc_fdir_get_pattern(vf, &len); - - for (i = 0; i < len; i++) - if (ice_vc_fdir_match_pattern(fltr, pattern[i].list)) - return 0; - - return -EINVAL; -} - -/** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info * @fltr: virtual channel add cmd buffer @@ -1299,11 +1027,11 @@ static int ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_fdir_fltr_conf *conf) { + struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - ret = ice_vc_fdir_search_pattern(vf, fltr); - if (ret) - return ret; + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, struct ice_fdir_fltr *input = &conf->input; struct ice_vsi *vsi, *ctrl_vsi; struct ice_fltr_desc desc; - enum ice_status status; struct device *dev; struct ice_pf *pf; struct ice_hw *hw; @@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - ret = ice_status_to_errno(status); + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); if (ret) { dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", vf->vf_id, input->flow_type); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 217ff5e9a6f1..39b80124d282 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -9,6 +9,7 @@ #include "ice_flow.h" #include "ice_eswitch.h" #include "ice_virtchnl_allowlist.h" +#include "ice_flex_pipe.h" #define FIELD_SELECTOR(proto_hdr_field) \ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) @@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type { u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ }; -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = { - {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, - {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | - ICE_FLOW_SEG_HDR_IPV_OTHER}, - {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, - {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, - {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, -}; - -static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = { +static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, @@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type { }; static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = { - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), - ICE_FLOW_HASH_IPV4}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), - ICE_FLOW_HASH_IPV6}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), - BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_TCP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), - ICE_FLOW_HASH_TCP_PORT}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_UDP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), - ICE_FLOW_HASH_UDP_PORT}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, - {VIRTCHNL_PROTO_HDR_SCTP, - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | - FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), - ICE_FLOW_HASH_SCTP_PORT}, -}; - -static const struct -ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = { +ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), @@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) } /** - * ice_err_to_virt_err - translate errors for VF return code - * @ice_err: error return code - */ -static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) -{ - switch (ice_err) { - case ICE_SUCCESS: - return VIRTCHNL_STATUS_SUCCESS; - case ICE_ERR_BAD_PTR: - case ICE_ERR_INVAL_SIZE: - case ICE_ERR_DEVICE_NOT_SUPPORTED: - case ICE_ERR_PARAM: - case ICE_ERR_CFG: - return VIRTCHNL_STATUS_ERR_PARAM; - case ICE_ERR_NO_MEMORY: - return VIRTCHNL_STATUS_ERR_NO_MEMORY; - case ICE_ERR_NOT_READY: - case ICE_ERR_RESET_FAILED: - case ICE_ERR_FW_API_VER: - case ICE_ERR_AQ_ERROR: - case ICE_ERR_AQ_TIMEOUT: - case ICE_ERR_AQ_FULL: - case ICE_ERR_AQ_NO_WORK: - case ICE_ERR_AQ_EMPTY: - return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; - default: - return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; - } -} - -/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) struct ice_hw *hw = &vsi->back->hw; struct ice_aqc_vsi_props *info; struct ice_vsi_ctx *ctxt; - enum ice_status status; - int ret = 0; + int ret; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) @@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable) info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); - if (status) { - dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - ret = -EIO; + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n", + ret, ice_aq_str(hw->adminq.sq_last_status)); goto out; } @@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) { struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi *vsi = ice_get_vf_vsi(vf); - enum ice_status status; u8 broadcast[ETH_ALEN]; + int status; if (ice_is_eswitch_mode_switchdev(vf->pf)) return 0; @@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) eth_broadcast_addr(broadcast); status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n", - vf->vf_id, ice_stat_str(status)); - return ice_status_to_errno(status); + dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n", + vf->vf_id, status); + return status; } vf->num_mac++; @@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); if (status) { - dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n", + dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n", &vf->hw_lan_addr.addr[0], vf->vf_id, - ice_stat_str(status)); - return ice_status_to_errno(status); + status); + return status; } vf->num_mac++; @@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf) ice_flush(hw); } -/** - * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s) - * @vf: pointer to the VF info - * @vsi: the VSI being configured - * @promisc_m: mask of promiscuous config bits - * @rm_promisc: promisc flag request from the VF to remove or add filter - * - * This function configures VF VSI promiscuous mode, based on the VF requests, - * for Unicast, Multicast and VLAN - */ -static enum ice_status -ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, - bool rm_promisc) +static int +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) { - struct ice_pf *pf = vf->pf; - enum ice_status status = 0; - struct ice_hw *hw; + struct ice_hw *hw = &vsi->back->hw; + int status; - hw = &pf->hw; - if (vsi->num_vlan) { - status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, - rm_promisc); - } else if (vf->port_vlan_info) { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - vf->port_vlan_info); - } else { - if (rm_promisc) - status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, - 0); - else - status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, - 0); + if (vf->port_vlan_info) + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -EEXIST) { + dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +static int +ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) +{ + struct ice_hw *hw = &vsi->back->hw; + int status; + + if (vf->port_vlan_info) + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_info & VLAN_VID_MASK); + else if (vsi->num_vlan > 1) + status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m); + else + status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0); + + if (status && status != -ENOENT) { + dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; } - return status; + return 0; } static void ice_vf_clear_counters(struct ice_vf *vf) @@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - enum ice_status status; struct device *dev; + int status; if (!vsi->agg_node) return; @@ -1617,6 +1502,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_vc_set_default_allowlist(vf); ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VFs since it should be * setup only when VF creates its first FDIR rule. */ @@ -1742,11 +1628,14 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) else promisc_m = ICE_UCAST_PROMISC_BITS; - if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) + if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m)) dev_err(dev, "disabling promiscuous mode failed\n"); } + ice_eswitch_del_vf_mac_rule(vf); + ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); /* clean VF control VSI when resetting VF since it should be setup * only when VF creates its first FDIR rule. */ @@ -1763,6 +1652,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_vf_post_vsi_rebuild(vf); vsi = ice_get_vf_vsi(vf); ice_eswitch_update_repr(vsi); + ice_eswitch_replay_vf_mac_rule(vf); /* if the VF has been reset allow it to come up again */ if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) @@ -1844,7 +1734,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; u8 broadcast[ETH_ALEN]; - enum ice_status status; struct ice_vsi *vsi; struct device *dev; int err; @@ -1864,11 +1753,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) } eth_broadcast_addr(broadcast); - status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); - if (status) { - dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n", - vf->vf_id, ice_stat_str(status)); - err = ice_status_to_errno(status); + err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI); + if (err) { + dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n", + vf->vf_id, err); goto release_vsi; } @@ -2021,6 +1909,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) if (ret) goto err_unroll_sriov; + /* rearm global interrupts */ + if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) + ice_irq_dynamic_ena(hw, NULL, NULL); + return 0; err_unroll_sriov: @@ -2110,7 +2002,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); struct device *dev = ice_pf_to_dev(pf); - enum ice_status status; int err; err = ice_check_sriov_allowed(pf); @@ -2130,9 +2021,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) return -EBUSY; } - status = ice_mbx_init_snapshot(&pf->hw, num_vfs); - if (status) - return ice_status_to_errno(status); + err = ice_mbx_init_snapshot(&pf->hw, num_vfs); + if (err) + return err; err = ice_pci_sriov_ena(pf, num_vfs); if (err) { @@ -2266,9 +2157,9 @@ int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { - enum ice_status aq_ret; struct device *dev; struct ice_pf *pf; + int aq_ret; if (!vf) return -EINVAL; @@ -2300,8 +2191,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n", - vf->vf_id, ice_stat_str(aq_ret), + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", + vf->vf_id, aq_ret, ice_aq_str(pf->hw.mailboxq.sq_last_status)); return -EIO; } @@ -2550,6 +2441,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len) } /** + * ice_vc_validate_pattern + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * + * validate the pattern is supported or not. + * + * Return: true on success, false on error. + */ +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) +{ + bool is_ipv4 = false; + bool is_ipv6 = false; + bool is_udp = false; + u16 ptype = -1; + int i = 0; + + while (i < proto->count && + proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { + switch (proto->proto_hdr[i].type) { + case VIRTCHNL_PROTO_HDR_ETH: + ptype = ICE_PTYPE_MAC_PAY; + break; + case VIRTCHNL_PROTO_HDR_IPV4: + ptype = ICE_PTYPE_IPV4_PAY; + is_ipv4 = true; + break; + case VIRTCHNL_PROTO_HDR_IPV6: + ptype = ICE_PTYPE_IPV6_PAY; + is_ipv6 = true; + break; + case VIRTCHNL_PROTO_HDR_UDP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_UDP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_UDP_PAY; + is_udp = true; + break; + case VIRTCHNL_PROTO_HDR_TCP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_TCP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_TCP_PAY; + break; + case VIRTCHNL_PROTO_HDR_SCTP: + if (is_ipv4) + ptype = ICE_PTYPE_IPV4_SCTP_PAY; + else if (is_ipv6) + ptype = ICE_PTYPE_IPV6_SCTP_PAY; + break; + case VIRTCHNL_PROTO_HDR_GTPU_IP: + case VIRTCHNL_PROTO_HDR_GTPU_EH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_GTPU; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_GTPU; + goto out; + case VIRTCHNL_PROTO_HDR_L2TPV3: + if (is_ipv4) + ptype = ICE_MAC_IPV4_L2TPV3; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_L2TPV3; + goto out; + case VIRTCHNL_PROTO_HDR_ESP: + if (is_ipv4) + ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : + ICE_MAC_IPV4_ESP; + else if (is_ipv6) + ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : + ICE_MAC_IPV6_ESP; + goto out; + case VIRTCHNL_PROTO_HDR_AH: + if (is_ipv4) + ptype = ICE_MAC_IPV4_AH; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_AH; + goto out; + case VIRTCHNL_PROTO_HDR_PFCP: + if (is_ipv4) + ptype = ICE_MAC_IPV4_PFCP_SESSION; + else if (is_ipv6) + ptype = ICE_MAC_IPV6_PFCP_SESSION; + goto out; + default: + break; + } + i++; + } + +out: + return ice_hw_ptype_ena(&vf->pf->hw, ptype); +} + +/** * ice_vc_parse_rss_cfg - parses hash fields and headers from * a specific virtchnl RSS cfg * @hw: pointer to the hardware @@ -2572,18 +2557,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; - if (!strncmp(hw->active_pkg_name, "ICE COMMS Package", - sizeof(hw->active_pkg_name))) { - hf_list = ice_vc_hash_field_list_comms; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms); - hdr_list = ice_vc_hdr_list_comms; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms); - } else { - hf_list = ice_vc_hash_field_list_os; - hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os); - hdr_list = ice_vc_hdr_list_os; - hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os); - } + hf_list = ice_vc_hash_field_list; + hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); + hdr_list = ice_vc_hdr_list; + hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr = @@ -2685,10 +2662,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } + if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { struct ice_vsi_ctx *ctx; - enum ice_status status; u8 lut_type, hash_type; + int status; lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : @@ -2717,9 +2699,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) status = ice_update_vsi(hw, vsi->idx, ctx, NULL); if (status) { - dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n", - ice_stat_str(status), - ice_aq_str(hw->adminq.sq_last_status)); + dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", + status, ice_aq_str(hw->adminq.sq_last_status)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { vsi->info.q_opt_rss = ctx->info.q_opt_rss; @@ -2744,19 +2725,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) vsi->vsi_num, v_ret); } } else { - enum ice_status status; + int status; status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, addl_hdrs); - /* We just ignore ICE_ERR_DOES_NOT_EXIST, because - * if two configurations share the same profile remove - * one of them actually removes both, since the - * profile is deleted. + /* We just ignore -ENOENT, because if two configurations + * share the same profile remove one of them actually + * removes both, since the profile is deleted. */ - if (status && status != ICE_ERR_DOES_NOT_EXIST) { + if (status && status != -ENOENT) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n", - vf->vf_id, ice_stat_str(status)); + dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", + vf->vf_id, status); } } } @@ -2914,7 +2894,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) struct ice_pf *pf = np->vsi->back; struct ice_vsi_ctx *ctx; struct ice_vsi *vf_vsi; - enum ice_status status; struct device *dev; struct ice_vf *vf; int ret; @@ -2964,12 +2943,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)); } - status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); - if (status) { - dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n", - ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, - ice_stat_str(status)); - ret = -EIO; + ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); + if (ret) { + dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n", + ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret); goto out; } @@ -3015,10 +2992,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf) static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - enum ice_status mcast_status = 0, ucast_status = 0; bool rm_promisc, alluni = false, allmulti = false; struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)msg; + int mcast_err = 0, ucast_err = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; struct device *dev; @@ -3100,24 +3077,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) ucast_m = ICE_UCAST_PROMISC_BITS; } - ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m, - !alluni); - if (ucast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - alluni ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(ucast_status); - } + if (alluni) + ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); + else + ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); - mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m, - !allmulti); - if (mcast_status) { - dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n", - allmulti ? "en" : "dis", vf->vf_id); - v_ret = ice_err_to_virt_err(mcast_status); - } + if (allmulti) + mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); + else + mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); + + if (ucast_err || mcast_err) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - if (!mcast_status) { + if (!mcast_err) { if (allmulti && !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", @@ -3127,7 +3101,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) vf->vf_id); } - if (!ucast_status) { + if (!ucast_err) { if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id); @@ -3807,8 +3781,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; - int ret = 0; + int ret; /* device MAC already added */ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) @@ -3819,18 +3792,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return -EPERM; } - status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_ALREADY_EXISTS) { + ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); + if (ret == -EEXIST) { dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, vf->vf_id); /* don't return since we might need to update * the primary MAC in ice_vfhw_mac_add() below */ - ret = -EEXIST; - } else if (status) { - dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); - return -EIO; + } else if (ret) { + dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, ret); + return ret; } else { vf->num_mac++; } @@ -3907,20 +3879,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, { struct device *dev = ice_pf_to_dev(vf->pf); u8 *mac_addr = vc_ether_addr->addr; - enum ice_status status; + int status; if (!ice_can_vf_change_mac(vf) && ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) return 0; status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); - if (status == ICE_ERR_DOES_NOT_EXIST) { + if (status == -ENOENT) { dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, vf->vf_id); return -ENOENT; } else if (status) { - dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n", - mac_addr, vf->vf_id, ice_stat_str(status)); + dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", + mac_addr, vf->vf_id, status); return -EIO; } @@ -4527,6 +4499,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) for (i = 0; i < al->num_elements; i++) { u8 *mac_addr = al->list[i].addr; + int result; if (!is_unicast_ether_addr(mac_addr) || ether_addr_equal(mac_addr, vf->hw_lan_addr.addr)) @@ -4538,6 +4511,13 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) goto handle_mac_exit; } + result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr); + if (result) { + dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n", + mac_addr, vf->vf_id, result); + goto handle_mac_exit; + } + ice_vfhw_mac_add(vf, &al->list[i]); vf->num_mac++; break; @@ -5283,9 +5263,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, s16 vf_id = le16_to_cpu(event->desc.retval); struct device *dev = ice_pf_to_dev(pf); struct ice_mbx_data mbxdata; - enum ice_status status; bool malvf = false; struct ice_vf *vf; + int status; if (ice_validate_vf_id(pf, vf_id)) return false; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 7e28ecbbe7af..752487a1bdd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -203,6 +203,8 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +bool +ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf); int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index ff55cb415b11..2388837d6d6c 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -12,6 +12,11 @@ #include "ice_txrx_lib.h" #include "ice_lib.h" +static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx) +{ + return &rx_ring->xdp_buf[idx]; +} + /** * ice_qp_reset_stats - Resets all stats for rings of given index * @vsi: VSI that contains rings of interest @@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) dma_addr_t dma; rx_desc = ICE_RX_DESC(rx_ring, ntu); - xdp = &rx_ring->xdp_buf[ntu]; + xdp = ice_xdp_buf(rx_ring, ntu); nb_buffs = min_t(u16, count, rx_ring->count - ntu); nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); @@ -383,20 +388,16 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) while (i--) { dma = xsk_buff_xdp_get_dma(*xdp); rx_desc->read.pkt_addr = cpu_to_le64(dma); + rx_desc->wb.status_error0 = 0; rx_desc++; xdp++; } ntu += nb_buffs; - if (ntu == rx_ring->count) { - rx_desc = ICE_RX_DESC(rx_ring, 0); - xdp = rx_ring->xdp_buf; + if (ntu == rx_ring->count) ntu = 0; - } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.status_error0 = 0; ice_release_rx_desc(rx_ring, ntu); return count == nb_buffs; @@ -418,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring) /** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring - * @xdp_arr: Pointer to the SW ring of xdp_buff pointers + * @xdp: Pointer to XDP buffer * * This function allocates a new skb from a zero-copy Rx buffer. * * Returns the skb on success, NULL on failure. */ static struct sk_buff * -ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) +ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) { - struct xdp_buff *xdp = *xdp_arr; + unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start; unsigned int metasize = xdp->data - xdp->data_meta; unsigned int datasize = xdp->data_end - xdp->data; - unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start; struct sk_buff *skb; skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, @@ -444,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) skb_metadata_set(skb, metasize); xsk_buff_free(xdp); - *xdp_arr = NULL; return skb; } @@ -482,7 +481,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, goto out_failure; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -506,7 +505,6 @@ out_failure: int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; @@ -521,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; - struct xdp_buff **xdp; + struct xdp_buff *xdp; struct sk_buff *skb; u16 stat_err_bits; u16 vlan_tag = 0; @@ -539,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) */ dma_rmb(); + xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; - if (!size) - break; + if (!size) { + xdp->data = NULL; + xdp->data_end = NULL; + xdp->data_hard_start = NULL; + xdp->data_meta = NULL; + goto construct_skb; + } - xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean]; - xsk_buff_set_size(*xdp, size); - xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); + xsk_buff_set_size(xdp, size); + xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); - xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring); + xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); if (xdp_res) { if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) xdp_xmit |= xdp_res; else - xsk_buff_free(*xdp); + xsk_buff_free(xdp); - *xdp = NULL; total_rx_bytes += size; total_rx_packets++; - cleaned_count++; ice_bump_ntc(rx_ring); continue; } - +construct_skb: /* XDP_PASS path */ skb = ice_construct_skb_zc(rx_ring, xdp); if (!skb) { @@ -571,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) break; } - cleaned_count++; ice_bump_ntc(rx_ring); if (eth_skb_pad(skb)) { @@ -593,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ice_receive_skb(rx_ring, skb, vlan_tag); } - if (cleaned_count >= ICE_RX_BUF_WRITE) - failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); + failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); @@ -810,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) */ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { - u16 i; - - for (i = 0; i < rx_ring->count; i++) { - struct xdp_buff **xdp = &rx_ring->xdp_buf[i]; + u16 count_mask = rx_ring->count - 1; + u16 ntc = rx_ring->next_to_clean; + u16 ntu = rx_ring->next_to_use; - if (!xdp) - continue; + for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { + struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc); - *xdp = NULL; + xsk_buff_free(xdp); } } diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 9265901455cd..b9b9d35494d2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -792,7 +792,6 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) **/ s32 igb_init_nvm_params_i210(struct e1000_hw *hw) { - s32 ret_val = 0; struct e1000_nvm_info *nvm = &hw->nvm; nvm->ops.acquire = igb_acquire_nvm_i210; @@ -813,7 +812,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw) nvm->ops.validate = NULL; nvm->ops.update = NULL; } - return ret_val; + return 0; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index fb1029352c3e..51a2dcaf553d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -864,7 +864,9 @@ static void igb_get_drvinfo(struct net_device *netdev, } static void igb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev, } static int igb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 836be0d3b291..38ba92022cd4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2927,7 +2927,7 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); /* Avoid transmit queue timeout since we share it with the slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); __netif_tx_unlock(nq); @@ -2961,7 +2961,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n, __netif_tx_lock(nq, cpu); /* Avoid transmit queue timeout since we share it with the slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -6739,12 +6739,119 @@ void igb_update_stats(struct igb_adapter *adapter) } } -static void igb_tsync_interrupt(struct igb_adapter *adapter) +static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u32 tsauxc; + + if (pin < 0 || pin >= IGB_N_PEROUT) + return; + + spin_lock(&adapter->tmreg_lock); + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + s64 ns = timespec64_to_ns(&adapter->perout[pin].period); + u32 systiml, systimh, level_mask, level, rem; + u64 systim, now; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&adapter->tc, systim); + + if (pin < 2) { + level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (tsintr_tt == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed falling edge\n", + adapter->sdp_config[pin].name); + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + pr_notice("igb: periodic output on %s missed rising edge\n", + adapter->sdp_config[pin].name); + } + } + + /* for this chip family tv_sec is the upper part of the binary value, + * so not seconds + */ + ts.tv_nsec = (u32)systim; + ts.tv_sec = ((u32)(systim >> 32)) & 0xFF; + } else { + ts = timespec64_add(adapter->perout[pin].start, + adapter->perout[pin].period); + } + + /* u32 conversion of tv_sec is safe until y2106 */ + wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec); + wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[pin].start = ts; + + spin_unlock(&adapter->tmreg_lock); +} + +static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) { + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); + int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0; + int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0; struct e1000_hw *hw = &adapter->hw; struct ptp_clock_event event; struct timespec64 ts; - u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + + if (pin < 0 || pin >= IGB_N_EXTTS) + return; + + if (hw->mac.type == e1000_82580 || + hw->mac.type == e1000_i354 || + hw->mac.type == e1000_i350) { + s64 ns = rd32(auxstmpl); + + ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32; + ts = ns_to_timespec64(ns); + } else { + ts.tv_nsec = rd32(auxstmpl); + ts.tv_sec = rd32(auxstmph); + } + + event.type = PTP_CLOCK_EXTTS; + event.index = tsintr_tt; + event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec; + ptp_clock_event(adapter->ptp_clock, &event); +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ack = 0, tsicr = rd32(E1000_TSICR); + struct ptp_clock_event event; if (tsicr & TSINTR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; @@ -6760,51 +6867,22 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) } if (tsicr & TSINTR_TT0) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[0].start, - adapter->perout[0].period); - /* u32 conversion of tv_sec is safe until y2106 */ - wr32(E1000_TRGTTIML0, ts.tv_nsec); - wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT0; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[0].start = ts; - spin_unlock(&adapter->tmreg_lock); + igb_perout(adapter, 0); ack |= TSINTR_TT0; } if (tsicr & TSINTR_TT1) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[1].start, - adapter->perout[1].period); - wr32(E1000_TRGTTIML1, ts.tv_nsec); - wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT1; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[1].start = ts; - spin_unlock(&adapter->tmreg_lock); + igb_perout(adapter, 1); ack |= TSINTR_TT1; } if (tsicr & TSINTR_AUTT0) { - nsec = rd32(E1000_AUXSTMPL0); - sec = rd32(E1000_AUXSTMPH0); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); + igb_extts(adapter, 0); ack |= TSINTR_AUTT0; } if (tsicr & TSINTR_AUTT1) { - nsec = rd32(E1000_AUXSTMPL1); - sec = rd32(E1000_AUXSTMPH1); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); + igb_extts(adapter, 1); ack |= TSINTR_AUTT1; } @@ -7648,6 +7726,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, struct vf_mac_filter *entry = NULL; int ret = 0; + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && + !vf_data->trusted) { + dev_warn(&pdev->dev, + "VF %d requested MAC filter but is administratively denied\n", + vf); + return -EINVAL; + } + if (!is_valid_ether_addr(addr)) { + dev_warn(&pdev->dev, + "VF %d attempted to set invalid MAC filter\n", + vf); + return -EINVAL; + } + switch (info) { case E1000_VF_MAC_FILTER_CLR: /* remove all unicast MAC filters related to the current VF */ @@ -7661,20 +7753,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf, } break; case E1000_VF_MAC_FILTER_ADD: - if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) && - !vf_data->trusted) { - dev_warn(&pdev->dev, - "VF %d requested MAC filter but is administratively denied\n", - vf); - return -EINVAL; - } - if (!is_valid_ether_addr(addr)) { - dev_warn(&pdev->dev, - "VF %d attempted to set invalid MAC filter\n", - vf); - return -EINVAL; - } - /* try to find empty slot in the list */ list_for_each(pos, &adapter->vf_macs.l) { entry = list_entry(pos, struct vf_mac_filter, l); @@ -8026,7 +8104,7 @@ static int igb_poll(struct napi_struct *napi, int budget) if (likely(napi_complete_done(napi, work_done))) igb_ring_irq_enable(q_vector); - return min(work_done, budget - 1); + return work_done; } /** @@ -8367,7 +8445,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -8422,7 +8500,7 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, result = IGB_XDP_REDIR; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -9254,7 +9332,7 @@ static int __maybe_unused igb_suspend(struct device *dev) return __igb_shutdown(to_pci_dev(dev), NULL, 0); } -static int __maybe_unused igb_resume(struct device *dev) +static int __maybe_unused __igb_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -9297,17 +9375,24 @@ static int __maybe_unused igb_resume(struct device *dev) wr32(E1000_WUS, ~0); - rtnl_lock(); + if (!rpm) + rtnl_lock(); if (!err && netif_running(netdev)) err = __igb_open(netdev, true); if (!err) netif_device_attach(netdev); - rtnl_unlock(); + if (!rpm) + rtnl_unlock(); return err; } +static int __maybe_unused igb_resume(struct device *dev) +{ + return __igb_resume(dev, false); +} + static int __maybe_unused igb_runtime_idle(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); @@ -9326,7 +9411,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev) static int __maybe_unused igb_runtime_resume(struct device *dev) { - return igb_resume(dev); + return __igb_resume(dev, true); } static void igb_shutdown(struct pci_dev *pdev) @@ -9442,7 +9527,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation - * resembles the first-half of the igb_resume routine. + * resembles the first-half of the __igb_resume routine. **/ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) { @@ -9482,7 +9567,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the - * second-half of the igb_resume routine. + * second-half of the __igb_resume routine. */ static void igb_io_resume(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0011b15e678c..6580fcddb4be 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -69,6 +69,7 @@ #define IGB_NBITS_82580 40 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +static void igb_ptp_sdp_init(struct igb_adapter *adapter); /* SYSTIM read access for the 82576 */ static u64 igb_ptp_read_82576(const struct cyclecounter *cc) @@ -507,6 +508,158 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) wr32(E1000_CTRL_EXT, ctrl_ext); } +static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml, + systimh, level_mask, level, rem; + struct e1000_hw *hw = &igb->hw; + struct timespec64 ts, start; + unsigned long flags; + u64 systim, now; + int pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + ns = ns >> 1; + if (on && ns < 8LL) + return -EINVAL; + ts = ns_to_timespec64(ns); + if (rq->perout.index == 1) { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } + if (on) { + int i = rq->perout.index; + + /* read systim registers in sequence */ + rd32(E1000_SYSTIMR); + systiml = rd32(E1000_SYSTIML); + systimh = rd32(E1000_SYSTIMH); + systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml); + now = timecounter_cyc2time(&igb->tc, systim); + + if (pin < 2) { + level_mask = (i == 1) ? 0x80000 : 0x40000; + level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0; + } else { + level_mask = (i == 1) ? 0x80 : 0x40; + level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0; + } + + div_u64_rem(now, ns, &rem); + systim = systim + (ns - rem); + + /* synchronize pin level with rising/falling edges */ + div_u64_rem(now, ns << 1, &rem); + if (rem < ns) { + /* first half of period */ + if (level == 0) { + /* output is already low, skip this period */ + systim += ns; + } + } else { + /* second half of period */ + if (level == 1) { + /* output is already high, skip this period */ + systim += ns; + } + } + + start = ns_to_timespec64(systim + (ns - rem)); + igb_pin_perout(igb, i, pin, 0); + igb->perout[i].start.tv_sec = start.tv_sec; + igb->perout[i].start.tv_nsec = start.tv_nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + + wr32(trgttiml, (u32)systim); + wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { @@ -1015,10 +1168,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; @@ -1192,7 +1341,6 @@ void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - int i; switch (hw->mac.type) { case e1000_82576: @@ -1215,16 +1363,21 @@ void igb_ptp_init(struct igb_adapter *adapter) case e1000_82580: case e1000_i354: case e1000_i350: + igb_ptp_sdp_init(adapter); snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; - adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; adapter->ptp_caps.pps = 0; + adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; adapter->ptp_caps.settime64 = igb_ptp_settime_82576; - adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->ptp_caps.enable = igb_ptp_feature_enable_82580; + adapter->ptp_caps.verify = igb_ptp_verify_pin; adapter->cc.read = igb_ptp_read_82580; adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); adapter->cc.mult = 1; @@ -1233,13 +1386,7 @@ void igb_ptp_init(struct igb_adapter *adapter) break; case e1000_i210: case e1000_i211: - for (i = 0; i < IGB_N_SDP; i++) { - struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; - - snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); - ppd->index = i; - ppd->func = PTP_PF_NONE; - } + igb_ptp_sdp_init(adapter); snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; @@ -1285,6 +1432,23 @@ void igb_ptp_init(struct igb_adapter *adapter) } /** + * igb_ptp_sdp_init - utility function which inits the SDP config structs + * @adapter: Board private structure. + **/ +void igb_ptp_sdp_init(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } +} + +/** * igb_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure * diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 06e5bd646a0e..9d4322b74163 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -175,7 +175,9 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 74ccd622251a..b78407289741 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter) /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) - dev_warn(&adapter->pdev->dev, "PF still resetting\n"); + dev_info(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); @@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_hw_init: + netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index c7fe61509d5b..5c66b97c0cfa 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -85,9 +85,6 @@ #define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) -/* Physical Func Reset Done Indication */ -#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 - /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 @@ -584,7 +581,6 @@ #define IGC_GEN_POLL_TIMEOUT 1920 /* PHY Control Register */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ @@ -605,9 +601,6 @@ #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ -/* Bit definitions for valid PHY IDs. I = Integrated E = External */ -#define I225_I_PHY_ID 0x67C9DC00 - /* MDI Control */ #define IGC_MDIC_DATA_MASK 0x0000FFFF #define IGC_MDIC_REG_MASK 0x001F0000 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index e0a76ac1bbbc..8cc077b712ad 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -567,8 +567,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev, return ret_val; } -static void igc_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void +igc_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -578,8 +581,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev, ring->tx_pending = adapter->tx_ring_count; } -static int igc_ethtool_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static int +igc_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 587db7483f25..b1e72ec5f131 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -55,7 +55,6 @@ enum igc_mac_type { enum igc_phy_type { igc_phy_unknown = 0, - igc_phy_none, igc_phy_i225, }; @@ -68,8 +67,6 @@ enum igc_media_type { enum igc_nvm_type { igc_nvm_unknown = 0, igc_nvm_eeprom_spi, - igc_nvm_flash_hw, - igc_nvm_invm, }; struct igc_info { diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index b2ef9fde97b3..66ea566488d1 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -473,13 +473,11 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw) /* NVM Function Pointers */ if (igc_get_flash_presence_i225(hw)) { - hw->nvm.type = igc_nvm_flash_hw; nvm->ops.read = igc_read_nvm_srrd_i225; nvm->ops.write = igc_write_nvm_srwr_i225; nvm->ops.validate = igc_validate_nvm_checksum_i225; nvm->ops.update = igc_update_nvm_checksum_i225; } else { - hw->nvm.type = igc_nvm_invm; nvm->ops.read = igc_read_nvm_eerd; nvm->ops.write = NULL; nvm->ops.validate = NULL; @@ -636,7 +634,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) ltrv = rd32(IGC_LTRMAXV); if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | - (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + (scale_max << IGC_LTRMAXV_SCALE_SHIFT); wr32(IGC_LTRMAXV, ltrv); } } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8e448288ee26..2f17f36e94fd 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring, static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, struct igc_rx_buffer *rx_buffer, - union igc_adv_rx_desc *rx_desc, - unsigned int size) + struct xdp_buff *xdp) { - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(va - IGC_SKB_PAD, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, IGC_SKB_PAD); + skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, size); + if (metasize) + skb_metadata_set(skb, metasize); igc_rx_buffer_flip(rx_buffer, truesize); return skb; @@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct xdp_buff *xdp, ktime_t timestamp) { + unsigned int metasize = xdp->data - xdp->data_meta; unsigned int size = xdp->data_end - xdp->data; unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size); void *va = xdp->data; @@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, struct sk_buff *skb; /* prefetch first cache line of first page */ - net_prefetch(va); + net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IGC_RX_HDR_LEN + metasize); if (unlikely(!skb)) return NULL; @@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, + ALIGN(headlen + metasize, sizeof(long))); + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } /* update all of the pointers */ size -= headlen; @@ -2231,7 +2241,7 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter, return IGC_XDP_REDIRECT; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) if (!skb) { xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq); xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), - igc_rx_offset(rx_ring) + pkt_offset, size, false); + igc_rx_offset(rx_ring) + pkt_offset, + size, true); skb = igc_xdp_run_prog(adapter, &xdp); } @@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) } else if (skb) igc_add_rx_frag(rx_ring, rx_buffer, skb, size); else if (ring_uses_build_skb(rx_ring)) - skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size); + skb = igc_build_skb(rx_ring, rx_buffer, &xdp); else skb = igc_construct_skb(rx_ring, rx_buffer, &xdp, timestamp); @@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); - if (metasize) + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } return skb; } @@ -5467,6 +5480,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5510,6 +5526,9 @@ static irqreturn_t igc_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + if (icr & IGC_ICR_TS) + igc_tsync_interrupt(adapter); + napi_schedule(&q_vector->napi); return IRQ_HANDLED; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 30568e3544cd..0d6e3215e98f 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct hwtstamp_config *config) { - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: igc_ptp_disable_tx_timestamp(adapter); @@ -768,7 +764,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) */ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) { - return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; + if (!IS_ENABLED(CONFIG_X86_TSC)) + return false; + + /* FIXME: it was noticed that enabling support for PCIe PTM in + * some i225-V models could cause lockups when bringing the + * interface up/down. There should be no downsides to + * disabling crosstimestamping support for i225-V, as it + * doesn't have any PTP support. That way we gain some time + * while root causing the issue. + */ + if (adapter->pdev->device == IGC_DEV_ID_I225_V) + return false; + + return pcie_ptm_enabled(adapter->pdev); } static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index a8cf5374be47..aeeb34e64610 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020, Intel Corporation. */ +#include <linux/if_vlan.h> #include <net/xdp_sock_drv.h> #include "igc.h" diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index 582099a5ad41..46efcfab7234 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -464,7 +464,9 @@ ixgb_get_drvinfo(struct net_device *netdev, static void ixgb_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_desc_ring *txdr = &adapter->tx_ring; @@ -478,7 +480,9 @@ ixgb_get_ringparam(struct net_device *netdev, static int ixgb_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_desc_ring *txdr = &adapter->tx_ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 8362822316a9..f70967c32116 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1118,7 +1118,9 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, } static void ixgbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; @@ -1131,7 +1133,9 @@ static void ixgbe_get_ringparam(struct net_device *netdev, } static int ixgbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0f9f022260d7..c6ff656b2476 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2170,7 +2170,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, net_prefetch(xdp->data_meta); /* build an skb to around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -2235,7 +2235,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, result = IXGBE_XDP_REDIR; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + /* remove NBASE-T speeds from default autonegotiation + * to accommodate broken network switches in the field + * which cannot cope with advertised NBASE-T speeds + */ speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | IXGBE_LINK_SPEED_2_5GB_FULL); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 23ddfd79fc8b..336426a67ac1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, bool is_l2 = false; u32 regval; - /* reserved for future extensions */ - if (config->flags) - return -EINVAL; - switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index a82533f21d36..bba3feaf3318 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -35,8 +35,6 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid); -void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); - bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9724ffb16518..e4b50c7781ff 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); + /* set MDIO speed before talking to the PHY in case it's the 1st time */ + ixgbe_set_mdio_speed(hw); + /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index db2bc58dfcfd..b3fd8e5cd85b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -131,7 +131,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, goto out_failure; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 6bace746eaac..5f08779c0e4e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -281,6 +281,10 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ERR_INVALID_MAC_ADDR -1 #define IXGBE_ERR_RESET_FAILED -2 #define IXGBE_ERR_INVALID_ARGUMENT -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_MBX -5 +#define IXGBE_ERR_TIMEOUT -6 +#define IXGBE_ERR_PARAM -7 /* Transmit Config masks */ #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 8380f905e708..3b41f83c8dff 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -225,7 +225,9 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, } static void ixgbevf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -236,7 +238,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev, } static int ixgbevf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index e3e4676af9e4..e763cee0695e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -40,16 +40,16 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, spin_lock_bh(&adapter->mbx_lock); - ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); if (ret) goto out; - ret = hw->mbx.ops.read_posted(hw, msgbuf, 2); + ret = ixgbevf_poll_mbx(hw, msgbuf, 2); if (ret) goto out; ret = (int)msgbuf[1]; - if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0) + if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0) ret = -1; out: @@ -77,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) spin_lock_bh(&adapter->mbx_lock); - err = hw->mbx.ops.write_posted(hw, msgbuf, 2); + err = ixgbevf_write_mbx(hw, msgbuf, 2); if (err) goto out; - err = hw->mbx.ops.read_posted(hw, msgbuf, 2); + err = ixgbevf_poll_mbx(hw, msgbuf, 2); if (err) goto out; @@ -623,6 +623,7 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) switch (adapter->hw.api_version) { case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: break; default: return; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a0e325774819..e257390a4f6a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -430,6 +430,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy; extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; @@ -491,4 +492,8 @@ void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); #define hw_dbg(hw, format, arg...) \ netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg) + +s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size); +s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size); + #endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d81811ab4ec4..0015fcf1df2b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -944,7 +944,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, net_prefetch(xdp->data_meta); /* build an skb around the page buffer */ - skb = build_skb(xdp->data_hard_start, truesize); + skb = napi_build_skb(xdp->data_hard_start, truesize); if (unlikely(!skb)) return NULL; @@ -1070,7 +1070,7 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, goto out_failure; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: out_failure: @@ -2266,6 +2266,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; static const int api[] = { + ixgbe_mbox_api_15, ixgbe_mbox_api_14, ixgbe_mbox_api_13, ixgbe_mbox_api_12, @@ -2284,6 +2285,12 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) idx++; } + if (hw->api_version >= ixgbe_mbox_api_15) { + hw->mbx.ops.init_params(hw); + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); + } + spin_unlock_bh(&adapter->mbx_lock); } @@ -2627,6 +2634,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -4565,7 +4573,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; - memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, sizeof(struct ixgbe_mbx_operations)); /* setup the private structure */ @@ -4625,6 +4633,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 6bc1953263b9..a55dd978f7ca 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -15,16 +15,15 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; + if (!countdown || !mbx->ops.check_for_msg) + return IXGBE_ERR_CONFIG; + while (countdown && mbx->ops.check_for_msg(hw)) { countdown--; udelay(mbx->udelay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; + return countdown ? 0 : IXGBE_ERR_TIMEOUT; } /** @@ -38,87 +37,82 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; + if (!countdown || !mbx->ops.check_for_ack) + return IXGBE_ERR_CONFIG; + while (countdown && mbx->ops.check_for_ack(hw)) { countdown--; udelay(mbx->udelay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; - - return countdown ? 0 : IXGBE_ERR_MBX; + return countdown ? 0 : IXGBE_ERR_TIMEOUT; } /** - * ixgbevf_read_posted_mbx - Wait for message notification and receive message - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer + * ixgbevf_read_mailbox_vf - read VF's mailbox register + * @hw: pointer to the HW structure * - * returns 0 if it successfully received a message notification and - * copied it into the receive buffer. + * This function is used to read the mailbox register dedicated for VF without + * losing the read to clear status bits. **/ -static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; + u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - if (!mbx->ops.read) - goto out; + vf_mailbox |= hw->mbx.vf_mailbox; + hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS; - ret_val = ixgbevf_poll_for_msg(hw); - - /* if ack received read message, otherwise we timed out */ - if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size); -out: - return ret_val; + return vf_mailbox; } /** - * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack - * @hw: pointer to the HW structure - * @msg: The message buffer - * @size: Length of buffer + * ixgbevf_clear_msg_vf - clear PF status bit + * @hw: pointer to the HW structure * - * returns 0 if it successfully copied message into the buffer and - * received an ack to that message within delay * timeout period + * This function is used to clear PFSTS bit in the VFMAILBOX register **/ -static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; + u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); - /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) - goto out; + if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) { + hw->mbx.stats.reqs++; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS; + } +} - /* send msg */ - ret_val = mbx->ops.write(hw, msg, size); +/** + * ixgbevf_clear_ack_vf - clear PF ACK bit + * @hw: pointer to the HW structure + * + * This function is used to clear PFACK bit in the VFMAILBOX register + **/ +static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); - /* if msg sent wait until we receive an ack */ - if (!ret_val) - ret_val = ixgbevf_poll_for_ack(hw); -out: - return ret_val; + if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) { + hw->mbx.stats.acks++; + hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK; + } } /** - * ixgbevf_read_v2p_mailbox - read v2p mailbox - * @hw: pointer to the HW structure + * ixgbevf_clear_rst_vf - clear PF reset bit + * @hw: pointer to the HW structure * - * This function is used to read the v2p mailbox without losing the read to - * clear status bits. + * This function is used to clear reset indication and reset done bit in + * VFMAILBOX register after reset the shared resources and the reset sequence. **/ -static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) +static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw) { - u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); - - v2p_mailbox |= hw->mbx.v2p_mailbox; - hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); - return v2p_mailbox; + if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) { + hw->mbx.stats.rsts++; + hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI | + IXGBE_VFMAILBOX_RSTD); + } } /** @@ -131,14 +125,12 @@ static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) **/ static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) { - u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); + u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw); s32 ret_val = IXGBE_ERR_MBX; - if (v2p_mailbox & mask) + if (vf_mailbox & mask) ret_val = 0; - hw->mbx.v2p_mailbox &= ~mask; - return ret_val; } @@ -172,6 +164,7 @@ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { ret_val = 0; + ixgbevf_clear_ack_vf(hw); hw->mbx.stats.acks++; } @@ -191,6 +184,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | IXGBE_VFMAILBOX_RSTI))) { ret_val = 0; + ixgbevf_clear_rst_vf(hw); hw->mbx.stats.rsts++; } @@ -205,19 +199,59 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) **/ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) { - s32 ret_val = IXGBE_ERR_MBX; + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; + int countdown = mbx->timeout; + u32 vf_mailbox; - /* Take ownership of the buffer */ - IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + if (!mbx->timeout) + return ret_val; - /* reserve mailbox for VF use */ - if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) - ret_val = 0; + while (countdown--) { + /* Reserve mailbox for VF use */ + vf_mailbox = ixgbevf_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* Verify that VF is the owner of the lock */ + if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) { + ret_val = 0; + break; + } + + /* Wait a bit before trying again */ + udelay(mbx->udelay); + } + + if (ret_val) + ret_val = IXGBE_ERR_TIMEOUT; return ret_val; } /** + * ixgbevf_release_mbx_lock_vf - release mailbox lock + * @hw: pointer to the HW structure + **/ +static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw) +{ + u32 vf_mailbox; + + /* Return ownership of the buffer */ + vf_mailbox = ixgbevf_read_mailbox_vf(hw); + vf_mailbox &= ~IXGBE_VFMAILBOX_VFU; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); +} + +/** + * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock + * @hw: pointer to the HW structure + **/ +static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw) +{ +} + +/** * ixgbevf_write_mbx_vf - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer @@ -227,6 +261,50 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) **/ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) { + u32 vf_mailbox; + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent PF/VF race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbevf_clear_msg_vf(hw); + ixgbevf_clear_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* interrupt the PF to tell it a message has been sent */ + vf_mailbox = ixgbevf_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_REQ; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* if msg sent wait until we receive an ack */ + ret_val = ixgbevf_poll_for_ack(hw); + +out_no_write: + hw->mbx.ops.release(hw); + + return ret_val; +} + +/** + * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer + **/ +static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ s32 ret_val; u16 i; @@ -237,7 +315,9 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) /* flush msg and acks as we are overwriting the message buffer */ ixgbevf_check_for_msg_vf(hw); + ixgbevf_clear_msg_vf(hw); ixgbevf_check_for_ack_vf(hw); + ixgbevf_clear_ack_vf(hw); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) @@ -263,6 +343,42 @@ out_no_write: **/ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) { + u32 vf_mailbox; + s32 ret_val; + u16 i; + + /* check if there is a message from PF */ + ret_val = ixgbevf_check_for_msg_vf(hw); + if (ret_val) + return ret_val; + + ixgbevf_clear_msg_vf(hw); + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt */ + vf_mailbox = ixgbevf_read_mailbox_vf(hw); + vf_mailbox |= IXGBE_VFMAILBOX_ACK; + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return ret_val; +} + +/** + * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully read message from buffer + **/ +static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ s32 ret_val = 0; u16 i; @@ -298,7 +414,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) /* start mailbox as timed out and let the reset_hw call set the timeout * value to begin communications */ - mbx->timeout = 0; + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; mbx->size = IXGBE_VFMAILBOX_SIZE; @@ -312,12 +428,79 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) return 0; } +/** + * ixgbevf_poll_mbx - Wait for message and read it from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully read message from buffer + **/ +s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; + + if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout) + return ret_val; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + ret_val = ixgbevf_poll_for_msg(hw); + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); + + return ret_val; +} + +/** + * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer and + * received an ACK to that message within specified period + **/ +s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_CONFIG; + + /** + * exit if either we can't write, release + * or there is no timeout defined + */ + if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release || + !mbx->timeout) + return ret_val; + + if (size > mbx->size) + ret_val = IXGBE_ERR_PARAM; + else + ret_val = mbx->ops.write(hw, msg, size); + + return ret_val; +} + const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .init_params = ixgbevf_init_mbx_params_vf, + .release = ixgbevf_release_mbx_lock_vf, .read = ixgbevf_read_mbx_vf, .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; + +const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = { + .init_params = ixgbevf_init_mbx_params_vf, + .release = ixgbevf_release_mbx_lock_vf_legacy, + .read = ixgbevf_read_mbx_vf_legacy, + .write = ixgbevf_write_mbx_vf_legacy, .check_for_msg = ixgbevf_check_for_msg_vf, .check_for_ack = ixgbevf_check_for_ack_vf, .check_for_rst = ixgbevf_check_for_rst_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 853796c8ef0e..7346ccf014a5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -7,7 +7,6 @@ #include "vf.h" #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 #define IXGBE_VFMAILBOX 0x002FC #define IXGBE_VFMBMEM 0x00200 @@ -39,14 +38,17 @@ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. - * Message ACK's are the value or'd with 0xF0000000 + * Message results are the value or'd with 0xF0000000 */ -/* Messages below or'd with this are the ACK */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 -/* Messages below or'd with this are the NACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 -/* Indicates that VF is still clear to send requests */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this + * have succeeded + */ +#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this + * have failed + */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + * clear to send requests + */ #define IXGBE_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) @@ -63,6 +65,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ + ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d459f5c8e98f..61d8970c6d1d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -13,13 +13,12 @@ static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, u32 *retmsg, u16 size) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 retval = mbx->ops.write_posted(hw, msg, size); + s32 retval = ixgbevf_write_mbx(hw, msg, size); if (retval) return retval; - return mbx->ops.read_posted(hw, retmsg, size); + return ixgbevf_poll_mbx(hw, retmsg, size); } /** @@ -75,6 +74,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) /* reset the api version */ hw->api_version = ixgbe_mbox_api_10; + hw->mbx.ops.init_params(hw); + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, + sizeof(struct ixgbe_mbx_operations)); IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); IXGBE_WRITE_FLUSH(hw); @@ -92,7 +94,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; msgbuf[0] = IXGBE_VF_RESET; - mbx->ops.write_posted(hw, msgbuf, 1); + ixgbevf_write_mbx(hw, msgbuf, 1); mdelay(10); @@ -100,7 +102,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) * also set up the mc_filter_type which is piggy backed * on the mac address in word 3 */ - ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); + ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); if (ret_val) return ret_val; @@ -108,11 +110,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) * to indicate that no MAC address has yet been assigned for * the VF. */ - if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && - msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE)) return IXGBE_ERR_INVALID_MAC_ADDR; - if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS)) ether_addr_copy(hw->mac.perm_addr, addr); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; @@ -269,7 +271,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE)) return -ENOMEM; } @@ -311,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: @@ -323,12 +326,12 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) msgbuf[0] = IXGBE_VF_GET_RETA; - err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + err = ixgbevf_write_mbx(hw, msgbuf, 1); if (err) return err; - err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); + err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1); if (err) return err; @@ -336,14 +339,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; /* If we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such. */ - if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; /* ixgbevf doesn't support more than 2 queues at the moment */ @@ -379,6 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_15: case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: @@ -390,12 +394,12 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) } msgbuf[0] = IXGBE_VF_GET_RSS_KEY; - err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + err = ixgbevf_write_mbx(hw, msgbuf, 1); if (err) return err; - err = hw->mbx.ops.read_posted(hw, msgbuf, 11); + err = ixgbevf_poll_mbx(hw, msgbuf, 11); if (err) return err; @@ -403,14 +407,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* If the operation has been refused by a PF return -EPERM */ - if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; /* If we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such. */ - if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); @@ -442,7 +446,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && - (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) { ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); return IXGBE_ERR_MBX; } @@ -545,8 +549,9 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; fallthrough; - case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: break; default: return -EOPNOTSUPP; @@ -561,7 +566,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) return err; msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) + if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE)) return -EPERM; return 0; @@ -606,7 +611,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); - if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) + if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS)) err = IXGBE_ERR_INVALID_ARGUMENT; mbx_err: @@ -705,12 +710,15 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ - if (mbx->ops.read(hw, &in_msg, 1)) + if (mbx->ops.read(hw, &in_msg, 1)) { + if (hw->api_version >= ixgbe_mbox_api_15) + mac->get_link_status = false; goto out; + } if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ - if (in_msg & IXGBE_VT_MSGTYPE_NACK) + if (in_msg & IXGBE_VT_MSGTYPE_FAILURE) ret_val = -1; goto out; } @@ -816,7 +824,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) if (ret_val) return ret_val; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && - (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) + (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) return IXGBE_ERR_MBX; return 0; @@ -863,7 +871,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* Store value and return 0 on success */ - if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | + IXGBE_VT_MSGTYPE_SUCCESS)) { hw->api_version = api; return 0; } @@ -901,6 +910,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: + case ixgbe_mbox_api_15: break; default: return 0; @@ -918,7 +928,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, * some sort of mailbox error so we should treat it * as such */ - if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK)) + if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS)) return IXGBE_ERR_MBX; /* record and validate values from message */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 1d8209df4162..54158dac8707 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -73,10 +73,9 @@ struct ixgbe_mac_info { struct ixgbe_mbx_operations { s32 (*init_params)(struct ixgbe_hw *hw); + void (*release)(struct ixgbe_hw *hw); s32 (*read)(struct ixgbe_hw *, u32 *, u16); s32 (*write)(struct ixgbe_hw *, u32 *, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); s32 (*check_for_msg)(struct ixgbe_hw *); s32 (*check_for_ack)(struct ixgbe_hw *); s32 (*check_for_rst)(struct ixgbe_hw *); @@ -96,7 +95,7 @@ struct ixgbe_mbx_info { struct ixgbe_mbx_stats stats; u32 timeout; u32 udelay; - u32 v2p_mailbox; + u32 vf_mailbox; u16 size; }; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6433c909c6b2..9b6fa27b7daf 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -25,6 +25,7 @@ #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/property.h> #include <asm/checksum.h> @@ -64,8 +65,8 @@ /* use 2 static channels for TX/RX */ #define LTQ_ETOP_TX_CHANNEL 1 #define LTQ_ETOP_RX_CHANNEL 6 -#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) -#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) +#define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL) +#define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL) #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) @@ -110,9 +111,9 @@ ltq_etop_alloc_skb(struct ltq_etop_chan *ch) ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN); if (!ch->skb[ch->dma.desc]) return -ENOMEM; - ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev, - ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, - DMA_FROM_DEVICE); + ch->dma.desc_base[ch->dma.desc].addr = + dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data, + MAX_DMA_DATA_LEN, DMA_FROM_DEVICE); ch->dma.desc_base[ch->dma.desc].addr = CPHYSADDR(ch->skb[ch->dma.desc]->data); ch->dma.desc_base[ch->dma.desc].ctl = @@ -134,7 +135,7 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch) spin_lock_irqsave(&priv->lock, flags); if (ltq_etop_alloc_skb(ch)) { netdev_err(ch->netdev, - "failed to allocate new rx buffer, stopping DMA\n"); + "failed to allocate new rx buffer, stopping DMA\n"); ltq_dma_close(&ch->dma); } ch->dma.desc++; @@ -184,7 +185,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget) dev_kfree_skb_any(ch->skb[ch->tx_free]); ch->skb[ch->tx_free] = NULL; memset(&ch->dma.desc_base[ch->tx_free], 0, - sizeof(struct ltq_dma_desc)); + sizeof(struct ltq_dma_desc)); ch->tx_free++; ch->tx_free %= LTQ_DESC_NUM; } @@ -217,6 +218,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) free_irq(ch->dma.irq, priv); if (IS_RX(ch->idx)) { int desc; + for (desc = 0; desc < LTQ_DESC_NUM; desc++) dev_kfree_skb_any(ch->skb[ch->dma.desc]); } @@ -239,23 +241,24 @@ ltq_etop_hw_init(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); int i; + int err; ltq_pmu_enable(PMU_PPE); switch (priv->pldata->mii_mode) { case PHY_INTERFACE_MODE_RMII: - ltq_etop_w32_mask(ETOP_MII_MASK, - ETOP_MII_REVERSE, LTQ_ETOP_CFG); + ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE, + LTQ_ETOP_CFG); break; case PHY_INTERFACE_MODE_MII: - ltq_etop_w32_mask(ETOP_MII_MASK, - ETOP_MII_NORMAL, LTQ_ETOP_CFG); + ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL, + LTQ_ETOP_CFG); break; default: netdev_err(dev, "unknown mii mode %d\n", - priv->pldata->mii_mode); + priv->pldata->mii_mode); return -ENOTSUPP; } @@ -268,12 +271,19 @@ ltq_etop_hw_init(struct net_device *dev) int irq = LTQ_DMA_CH0_INT + i; struct ltq_etop_chan *ch = &priv->ch[i]; - ch->idx = ch->dma.nr = i; + ch->dma.nr = i; + ch->idx = ch->dma.nr; ch->dma.dev = &priv->pdev->dev; if (IS_TX(i)) { ltq_dma_alloc_tx(&ch->dma); - request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + if (err) { + netdev_err(dev, + "Unable to get Tx DMA IRQ %d\n", + irq); + return err; + } } else if (IS_RX(i)) { ltq_dma_alloc_rx(&ch->dma); for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; @@ -281,7 +291,13 @@ ltq_etop_hw_init(struct net_device *dev) if (ltq_etop_alloc_skb(ch)) return -ENOMEM; ch->dma.desc = 0; - request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + if (err) { + netdev_err(dev, + "Unable to get Rx DMA IRQ %d\n", + irq); + return err; + } } ch->dma.irq = irq; } @@ -291,9 +307,9 @@ ltq_etop_hw_init(struct net_device *dev) static void ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver)); - strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver)); + strscpy(info->bus_info, "internal", sizeof(info->bus_info)); + strscpy(info->version, DRV_VERSION, sizeof(info->version)); } static const struct ethtool_ops ltq_etop_ethtool_ops = { @@ -385,7 +401,7 @@ ltq_etop_mdio_init(struct net_device *dev) priv->mii_bus->write = ltq_etop_mdio_wr; priv->mii_bus->name = "ltq_mii"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - priv->pdev->name, priv->pdev->id); + priv->pdev->name, priv->pdev->id); if (mdiobus_register(priv->mii_bus)) { err = -ENXIO; goto err_out_free_mdiobus; @@ -482,8 +498,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) netif_trans_update(dev); spin_lock_irqsave(&priv->lock, flags); - desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len, + desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len, DMA_TO_DEVICE)) - byte_offset; + /* Make sure the address is written before we give it to HW */ wmb(); desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); @@ -525,7 +542,7 @@ ltq_etop_set_mac_address(struct net_device *dev, void *p) spin_lock_irqsave(&priv->lock, flags); ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, - LTQ_ETOP_MAC_DA1); + LTQ_ETOP_MAC_DA1); spin_unlock_irqrestore(&priv->lock, flags); } return ret; @@ -638,15 +655,15 @@ ltq_etop_probe(struct platform_device *pdev) } res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), dev_name(&pdev->dev)); + resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "failed to request etop resource\n"); err = -EBUSY; goto err_out; } - ltq_etop_membase = devm_ioremap(&pdev->dev, - res->start, resource_size(res)); + ltq_etop_membase = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); if (!ltq_etop_membase) { dev_err(&pdev->dev, "failed to remap etop engine %d\n", pdev->id); @@ -673,22 +690,22 @@ ltq_etop_probe(struct platform_device *pdev) err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len); if (err < 0) { dev_err(&pdev->dev, "unable to read tx-burst-length property\n"); - return err; + goto err_free; } err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len); if (err < 0) { dev_err(&pdev->dev, "unable to read rx-burst-length property\n"); - return err; + goto err_free; } for (i = 0; i < MAX_DMA_CHAN; i++) { if (IS_TX(i)) netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_tx, 8); + ltq_etop_poll_tx, 8); else if (IS_RX(i)) netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_rx, 32); + ltq_etop_poll_rx, 32); priv->ch[i].netdev = dev; } @@ -726,7 +743,7 @@ static struct platform_driver ltq_mii_driver = { }, }; -int __init +static int __init init_ltq_etop(void) { int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 0da09ea81980..41d11137cde0 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -27,6 +27,9 @@ #define XRX200_DMA_TX 1 #define XRX200_DMA_BURST_LEN 8 +#define XRX200_DMA_PACKET_COMPLETE 0 +#define XRX200_DMA_PACKET_IN_PROGRESS 1 + /* cpu port mac */ #define PMAC_RX_IPG 0x0024 #define PMAC_RX_IPG_MASK 0xf @@ -60,7 +63,14 @@ struct xrx200_chan { struct napi_struct napi; struct ltq_dma_channel dma; - struct sk_buff *skb[LTQ_DESC_NUM]; + + union { + struct sk_buff *skb[LTQ_DESC_NUM]; + void *rx_buff[LTQ_DESC_NUM]; + }; + + struct sk_buff *skb_head; + struct sk_buff *skb_tail; struct xrx200_priv *priv; }; @@ -71,6 +81,9 @@ struct xrx200_priv { struct xrx200_chan chan_tx; struct xrx200_chan chan_rx; + u16 rx_buf_size; + u16 rx_skb_size; + struct net_device *net_dev; struct device *dev; @@ -97,6 +110,22 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, xrx200_pmac_w32(priv, val, offset); } +static int xrx200_max_frame_len(int mtu) +{ + return VLAN_ETH_HLEN + mtu; +} + +static int xrx200_buffer_size(int mtu) +{ + return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN); +} + +static int xrx200_skb_size(u16 buf_size) +{ + return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + /* drop all the packets from the DMA ring */ static void xrx200_flush_dma(struct xrx200_chan *ch) { @@ -109,8 +138,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch) break; desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + - ETH_FCS_LEN); + ch->priv->rx_buf_size; ch->dma.desc++; ch->dma.desc %= LTQ_DESC_NUM; } @@ -156,35 +184,34 @@ static int xrx200_close(struct net_device *net_dev) return 0; } -static int xrx200_alloc_skb(struct xrx200_chan *ch) +static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size)) { - int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; - struct sk_buff *skb = ch->skb[ch->dma.desc]; + void *buf = ch->rx_buff[ch->dma.desc]; + struct xrx200_priv *priv = ch->priv; dma_addr_t mapping; int ret = 0; - ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, - len); - if (!ch->skb[ch->dma.desc]) { + ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size); + if (!ch->rx_buff[ch->dma.desc]) { ret = -ENOMEM; goto skip; } - mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, - len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { - dev_kfree_skb_any(ch->skb[ch->dma.desc]); - ch->skb[ch->dma.desc] = skb; + mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc], + priv->rx_buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) { + skb_free_frag(ch->rx_buff[ch->dma.desc]); + ch->rx_buff[ch->dma.desc] = buf; ret = -ENOMEM; goto skip; } - ch->dma.desc_base[ch->dma.desc].addr = mapping; + ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN; /* Make sure the address is written before we give it to HW */ wmb(); skip: ch->dma.desc_base[ch->dma.desc].ctl = - LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size; return ret; } @@ -193,12 +220,14 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) { struct xrx200_priv *priv = ch->priv; struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; - struct sk_buff *skb = ch->skb[ch->dma.desc]; - int len = (desc->ctl & LTQ_DMA_SIZE_MASK); + void *buf = ch->rx_buff[ch->dma.desc]; + u32 ctl = desc->ctl; + int len = (ctl & LTQ_DMA_SIZE_MASK); struct net_device *net_dev = priv->net_dev; + struct sk_buff *skb; int ret; - ret = xrx200_alloc_skb(ch); + ret = xrx200_alloc_buf(ch, napi_alloc_frag); ch->dma.desc++; ch->dma.desc %= LTQ_DESC_NUM; @@ -209,13 +238,39 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) return ret; } + skb = build_skb(buf, priv->rx_skb_size); + skb_reserve(skb, NET_SKB_PAD); skb_put(skb, len); - skb->protocol = eth_type_trans(skb, net_dev); - netif_receive_skb(skb); - net_dev->stats.rx_packets++; - net_dev->stats.rx_bytes += len - ETH_FCS_LEN; - return 0; + /* add buffers to skb via skb->frag_list */ + if (ctl & LTQ_DMA_SOP) { + ch->skb_head = skb; + ch->skb_tail = skb; + skb_reserve(skb, NET_IP_ALIGN); + } else if (ch->skb_head) { + if (ch->skb_head == ch->skb_tail) + skb_shinfo(ch->skb_tail)->frag_list = skb; + else + ch->skb_tail->next = skb; + ch->skb_tail = skb; + ch->skb_head->len += skb->len; + ch->skb_head->data_len += skb->len; + ch->skb_head->truesize += skb->truesize; + } + + if (ctl & LTQ_DMA_EOP) { + ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev); + netif_receive_skb(ch->skb_head); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += ch->skb_head->len; + ch->skb_head = NULL; + ch->skb_tail = NULL; + ret = XRX200_DMA_PACKET_COMPLETE; + } else { + ret = XRX200_DMA_PACKET_IN_PROGRESS; + } + + return ret; } static int xrx200_poll_rx(struct napi_struct *napi, int budget) @@ -230,7 +285,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { ret = xrx200_hw_receive(ch); - if (ret) + if (ret == XRX200_DMA_PACKET_IN_PROGRESS) + continue; + if (ret != XRX200_DMA_PACKET_COMPLETE) return ret; rx++; } else { @@ -351,11 +408,13 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) struct xrx200_chan *ch_rx = &priv->chan_rx; int old_mtu = net_dev->mtu; bool running = false; - struct sk_buff *skb; + void *buff; int curr_desc; int ret = 0; net_dev->mtu = new_mtu; + priv->rx_buf_size = xrx200_buffer_size(new_mtu); + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); if (new_mtu <= old_mtu) return ret; @@ -371,13 +430,15 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; ch_rx->dma.desc++) { - skb = ch_rx->skb[ch_rx->dma.desc]; - ret = xrx200_alloc_skb(ch_rx); + buff = ch_rx->rx_buff[ch_rx->dma.desc]; + ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); if (ret) { net_dev->mtu = old_mtu; + priv->rx_buf_size = xrx200_buffer_size(old_mtu); + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); break; } - dev_kfree_skb_any(skb); + skb_free_frag(buff); } ch_rx->dma.desc = curr_desc; @@ -430,7 +491,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv) ltq_dma_alloc_rx(&ch_rx->dma); for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; ch_rx->dma.desc++) { - ret = xrx200_alloc_skb(ch_rx); + ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); if (ret) goto rx_free; } @@ -465,7 +526,7 @@ rx_ring_free: /* free the allocated RX ring */ for (i = 0; i < LTQ_DESC_NUM; i++) { if (priv->chan_rx.skb[i]) - dev_kfree_skb_any(priv->chan_rx.skb[i]); + skb_free_frag(priv->chan_rx.rx_buff[i]); } rx_free: @@ -482,7 +543,7 @@ static void xrx200_hw_cleanup(struct xrx200_priv *priv) /* free the allocated RX ring */ for (i = 0; i < LTQ_DESC_NUM; i++) - dev_kfree_skb_any(priv->chan_rx.skb[i]); + skb_free_frag(priv->chan_rx.rx_buff[i]); } static int xrx200_probe(struct platform_device *pdev) @@ -505,7 +566,9 @@ static int xrx200_probe(struct platform_device *pdev) net_dev->netdev_ops = &xrx200_netdev_ops; SET_NETDEV_DEV(net_dev, dev); net_dev->min_mtu = ETH_ZLEN; - net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0); + priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN); + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); /* load the memory ranges */ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); @@ -550,8 +613,10 @@ static int xrx200_probe(struct platform_device *pdev) PMAC_HD_CTL); /* setup NAPI */ - netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); - netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, + NAPI_POLL_WEIGHT); + netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, + NAPI_POLL_WEIGHT); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bb14fa2241a3..105247582684 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1638,7 +1638,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev, } static void -mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er, + struct kernel_ethtool_ringparam *kernel_er, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -1650,7 +1652,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) } static int -mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er, + struct kernel_ethtool_ringparam *kernel_er, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -3197,7 +3201,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->hw_features = dev->features; dev->priv_flags |= IFF_UNICAST_FLT; - dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS); /* MTU range: 64 - 9500 */ dev->min_mtu = 64; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 62a97c46fba0..ef878973b859 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = { }; MODULE_DEVICE_TABLE(of, orion_mdio_match); +#ifdef CONFIG_ACPI static const struct acpi_device_id orion_mdio_acpi_match[] = { { "MRVL0100", BUS_TYPE_SMI }, { "MRVL0101", BUS_TYPE_XSMI }, { }, }; MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match); +#endif static struct platform_driver orion_mdio_driver = { .probe = orion_mdio_probe, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5a7bdca22a63..83c8908f0cc7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -38,6 +38,7 @@ #include <net/ipv6.h> #include <net/tso.h> #include <net/page_pool.h> +#include <net/pkt_cls.h> #include <linux/bpf_trace.h> /* Registers */ @@ -247,12 +248,39 @@ #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_PORT_TX_RESET 0x3cf0 #define MVNETA_PORT_TX_DMA_RESET BIT(0) +#define MVNETA_TXQ_CMD1_REG 0x3e00 +#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) +#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) +#define MVNETA_REFILL_NUM_CLK_REG 0x3e08 +#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff #define MVNETA_TX_MTU 0x3e0c #define MVNETA_TX_TOKEN_SIZE 0x3e14 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff +#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 +#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff +/* The values of the bucket refill base period and refill period are taken from + * the reference manual, and adds up to a base resolution of 10Kbps. This allows + * to cover all rate-limit values from 10Kbps up to 5Gbps + */ + +/* Base period for the rate limit algorithm */ +#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 + +/* Number of Base Period to wait between each bucket refill */ +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 + +/* The base resolution for rate limiting, in bps. Any max_rate value should be + * a multiple of that value. + */ +#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ + (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ + MVNETA_TXQ_BUCKET_REFILL_PERIOD)) + #define MVNETA_LPI_CTRL_0 0x2cc0 #define MVNETA_LPI_CTRL_1 0x2cc4 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) @@ -492,13 +520,13 @@ struct mvneta_port { u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; - u8 prio_tc_map[8]; phy_interface_t phy_interface; struct device_node *dn; unsigned int tx_csum_limit; struct phylink *phylink; struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; struct phy *comphy; struct mvneta_bm *bm_priv; @@ -2212,7 +2240,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pp->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(pp->dev, prog, act); @@ -3819,58 +3847,31 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) return 0; } -static void mvneta_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) +static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + return container_of(pcs, struct mvneta_port, phylink_pcs); +} +static int mvneta_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ /* We only support QSGMII, SGMII, 802.3z and RGMII modes. * When in 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." */ if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) { - linkmode_zero(supported); - return; - } - - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - - /* Asymmetric pause is unsupported */ - phylink_set(mask, Pause); - - /* Half-duplex at speeds higher than 100Mbit is unsupported */ - if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - - if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } - - if (!phy_interface_mode_is_8023z(state->interface)) { - /* 10M and 100M are only supported in non-802.3z mode */ - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - } + !phylink_test(state->advertising, Autoneg)) + return -EINVAL; - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + return 0; } -static void mvneta_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) +static void mvneta_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct net_device *ndev = to_net_dev(config->dev); - struct mvneta_port *pp = netdev_priv(ndev); + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_stat; gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); @@ -3888,17 +3889,71 @@ static void mvneta_mac_pcs_get_state(struct phylink_config *config, state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); - state->pause = 0; if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_RX; if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_TX; } -static void mvneta_mac_an_restart(struct phylink_config *config) +static int mvneta_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - struct net_device *ndev = to_net_dev(config->dev); - struct mvneta_port *pp = netdev_priv(ndev); + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); + u32 mask, val, an, old_an, changed; + + mask = MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_INBAND_RESTART_AN | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_FLOW_CTRL_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + + if (phylink_autoneg_inband(mode)) { + mask |= MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + val = MVNETA_GMAC_INBAND_AN_ENABLE; + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the speed and duplex from PHY */ + val |= MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z mode has fixed speed and duplex */ + val |= MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + /* The FLOW_CTRL_EN bit selects either the hardware + * automatically or the CONFIG_FLOW_CTRL manually + * controls the GMAC pause mode. + */ + if (permit_pause_to_mac) + val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; + + /* Update the advertisement bits */ + mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + if (phylink_test(advertising, Pause)) + val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + } + } else { + /* Phy or fixed speed - disable in-band AN modes */ + val = 0; + } + + old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + an = (an & ~mask) | val; + changed = old_an ^ an; + if (changed) + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); + + /* We are only interested in the advertisement bits changing */ + return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); +} + +static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, @@ -3907,6 +3962,47 @@ static void mvneta_mac_an_restart(struct phylink_config *config) gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); } +static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { + .pcs_validate = mvneta_pcs_validate, + .pcs_get_state = mvneta_pcs_get_state, + .pcs_config = mvneta_pcs_config, + .pcs_an_restart = mvneta_pcs_an_restart, +}; + +static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + if (pp->phy_interface != interface || + phylink_autoneg_inband(mode)) { + /* Force the link down when changing the interface or if in + * in-band mode. According to Armada 370 documentation, we + * can only change the port mode and in-band enable when the + * link is down. + */ + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + if (pp->phy_interface != interface) + WARN_ON(phy_power_off(pp->comphy)); + + /* Enable the 1ms clock */ + if (phylink_autoneg_inband(mode)) { + unsigned long rate = clk_get_rate(pp->clk); + + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, + MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); + } + + return 0; +} + static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -3915,20 +4011,11 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); - u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | MVNETA_GMAC2_PORT_RESET); new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); - new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_INBAND_RESTART_AN | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | - MVNETA_GMAC_AN_FLOW_CTRL_EN | - MVNETA_GMAC_AN_DUPLEX_EN); /* Even though it might look weird, when we're configured in * SGMII or QSGMII mode, the RGMII bit needs to be set. @@ -3940,9 +4027,6 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, phy_interface_mode_is_8023z(state->interface)) new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; - if (phylink_test(state->advertising, Pause)) - new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; - if (!phylink_autoneg_inband(mode)) { /* Phy or fixed speed - nothing to do, leave the * configured speed, duplex and flow control as-is. @@ -3950,66 +4034,23 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the state from the PHY */ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX)) | - MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; } else { /* 802.3z negotiation - only 1000base-X */ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_CONFIG_MII_SPEED)) | - MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_CONFIG_GMII_SPEED | - /* The MAC only supports FD mode */ - MVNETA_GMAC_CONFIG_FULL_DUPLEX; - - if (state->pause & MLO_PAUSE_AN && state->an_enabled) - new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; - } - - /* Armada 370 documentation says we can only change the port mode - * and in-band enable when the link is down, so force it down - * while making these changes. We also do this for GMAC_CTRL2 - */ - if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || - (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || - (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | - MVNETA_GMAC_FORCE_LINK_DOWN); } - /* When at 2.5G, the link partner can send frames with shortened * preambles. */ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->phy_interface != state->interface) { - if (pp->comphy) - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_config_interface(pp, state->interface)); - } - if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); if (new_ctrl4 != gmac_ctrl4) mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); - if (new_clk != gmac_clk) - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); - if (new_an != gmac_an) - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & @@ -4018,6 +4059,36 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } } +static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val, clk; + + /* Disable 1ms clock if not in in-band mode */ + if (!phylink_autoneg_inband(mode)) { + clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); + } + + if (pp->phy_interface != interface) + /* Enable the Serdes PHY */ + WARN_ON(mvneta_config_interface(pp, interface)); + + /* Allow the link to come up if in in-band mode, otherwise the + * link is forced via mac_link_down()/mac_link_up() + */ + if (phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + return 0; +} + static void mvneta_set_eee(struct mvneta_port *pp, bool enable) { u32 lpi_ctl1; @@ -4104,10 +4175,10 @@ static void mvneta_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops mvneta_phylink_ops = { - .validate = mvneta_validate, - .mac_pcs_get_state = mvneta_mac_pcs_get_state, - .mac_an_restart = mvneta_mac_an_restart, + .validate = phylink_generic_validate, + .mac_prepare = mvneta_mac_prepare, .mac_config = mvneta_mac_config, + .mac_finish = mvneta_mac_finish, .mac_link_down = mvneta_mac_link_down, .mac_link_up = mvneta_mac_link_up, }; @@ -4539,8 +4610,11 @@ static void mvneta_ethtool_get_drvinfo(struct net_device *dev, } -static void mvneta_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void +mvneta_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(netdev); @@ -4550,8 +4624,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev, ring->tx_pending = pp->tx_ring_size; } -static int mvneta_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static int +mvneta_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); @@ -4919,43 +4996,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); } -static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) +static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) { - u32 val = 0; - int i; + u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); - for (i = 0; i < rxq_number; i++) - val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]); + val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); + val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); } +static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) +{ + unsigned long core_clk_rate; + u32 refill_cycles; + u32 val; + + core_clk_rate = clk_get_rate(pp->clk); + if (!core_clk_rate) + return -EINVAL; + + refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / + (NSEC_PER_SEC / core_clk_rate); + + if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) + return -EINVAL; + + /* Enable bw limit algorithm version 3 */ + val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); + val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); + mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); + + /* Set the base refill rate */ + mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); + + return 0; +} + +static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) +{ + u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); + + val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); + mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); +} + +static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, + u64 min_rate, u64 max_rate) +{ + u32 refill_val, rem; + u32 val = 0; + + /* Convert to from Bps to bps */ + max_rate *= 8; + + if (min_rate) + return -EINVAL; + + refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, + &rem); + + if (rem || !refill_val || + refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) + return -EINVAL; + + val = refill_val; + val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << + MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); + + mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); + + return 0; +} + static int mvneta_setup_mqprio(struct net_device *dev, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt_offload *mqprio) { struct mvneta_port *pp = netdev_priv(dev); + int rxq, txq, tc, ret; u8 num_tc; - int i; - qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = qopt->num_tc; + if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) + return 0; + + num_tc = mqprio->qopt.num_tc; if (num_tc > rxq_number) return -EINVAL; + mvneta_clear_rx_prio_map(pp); + if (!num_tc) { - mvneta_clear_rx_prio_map(pp); + mvneta_disable_per_queue_rate_limit(pp); netdev_reset_tc(dev); return 0; } - memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); + netdev_set_num_tc(dev, mqprio->qopt.num_tc); + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { + netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], + mqprio->qopt.offset[tc]); + + for (rxq = mqprio->qopt.offset[tc]; + rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; + rxq++) { + if (rxq >= rxq_number) + return -EINVAL; - mvneta_setup_rx_prio_map(pp); + mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); + } + } - netdev_set_num_tc(dev, qopt->num_tc); - for (i = 0; i < qopt->num_tc; i++) - netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); + if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + mvneta_disable_per_queue_rate_limit(pp); + return 0; + } + + if (mqprio->qopt.num_tc > txq_number) + return -EINVAL; + + ret = mvneta_enable_per_queue_rate_limit(pp); + if (ret) + return ret; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { + for (txq = mqprio->qopt.offset[tc]; + txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; + txq++) { + if (txq >= txq_number) + return -EINVAL; + + ret = mvneta_setup_queue_rates(pp, txq, + mqprio->min_rate[tc], + mqprio->max_rate[tc]); + if (ret) + return ret; + } + } return 0; } @@ -5166,6 +5344,9 @@ static int mvneta_probe(struct platform_device *pdev) pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; + pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | + MAC_100 | MAC_1000FD | MAC_2500FD; + phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.supported_interfaces); @@ -5237,6 +5418,9 @@ static int mvneta_probe(struct platform_device *pdev) goto err_clk; } + pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; + phylink_set_pcs(phylink, &pp->phylink_pcs); + /* Alloc per-cpu port structure */ pp->ports = alloc_percpu(struct mvneta_pcpu_port); if (!pp->ports) { @@ -5355,7 +5539,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS); /* MTU range: 68 - 9676 */ dev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index cf8acabb90ac..ad73a488fc5f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1239,7 +1239,8 @@ struct mvpp2_port { phy_interface_t phy_interface; struct phylink *phylink; struct phylink_config phylink_config; - struct phylink_pcs phylink_pcs; + struct phylink_pcs pcs_gmac; + struct phylink_pcs pcs_xlg; struct phy *comphy; struct mvpp2_bm_pool *pool_long; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2b18d89d9756..7cdbf8b8bbf6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) static bool mvpp2_is_xlg(phy_interface_t interface) { return interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_5GBASER || interface == PHY_INTERFACE_MODE_XAUI; } @@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) case PHY_INTERFACE_MODE_2500BASEX: mvpp22_gop_init_sgmii(port); break; + case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: if (!mvpp2_port_supports_xlg(port)) goto invalid_conf; @@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); switch (interface) { + case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: val = readl(mpcs + MVPP22_MPCS_CLK_RESET); val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | @@ -2960,11 +2963,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); if (priv->percpu_pools) { - err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_free_dma; - err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); if (err < 0) goto err_unregister_rxq_short; @@ -3820,7 +3823,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, } break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(port->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(port->dev, prog, act); @@ -5017,11 +5020,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } + if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { + netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", + mtu, (int)MVPP2_MAX_RX_BUF_SIZE); + return -EINVAL; + } + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { - if (port->xdp_prog) { - netdev_err(dev, "Jumbo frames are not supported with XDP\n"); - return -EINVAL; - } if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); @@ -5137,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -5307,8 +5309,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) bool running = netif_running(port->dev); bool reset = !prog != !port->xdp_prog; - if (port->dev->mtu > ETH_DATA_LEN) { - NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); + if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); return -EOPNOTSUPP; } @@ -5431,8 +5433,11 @@ static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } -static void mvpp2_ethtool_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static void +mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); @@ -5442,8 +5447,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev, ring->tx_pending = port->tx_ring_size; } -static int mvpp2_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static int +mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); u16 prev_rx_ring_size = port->rx_ring_size; @@ -6107,18 +6115,26 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) return container_of(config, struct mvpp2_port, phylink_config); } -static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) +static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mvpp2_port, pcs_xlg); +} + +static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) { - return container_of(pcs, struct mvpp2_port, phylink_pcs); + return container_of(pcs, struct mvpp2_port, pcs_gmac); } static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); u32 val; - state->speed = SPEED_10000; + if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) + state->speed = SPEED_5000; + else + state->speed = SPEED_10000; state->duplex = 1; state->an_complete = 1; @@ -6147,10 +6163,25 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_config = mvpp2_xlg_pcs_config, }; +static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ + /* When in 802.3z mode, we must have AN enabled: + * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + */ + if (phy_interface_mode_is_8023z(state->interface) && + !phylink_test(state->advertising, Autoneg)) + return -EINVAL; + + return 0; +} + static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val; val = readl(port->base + MVPP2_GMAC_STATUS0); @@ -6187,7 +6218,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, const unsigned long *advertising, bool permit_pause_to_mac) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 mask, val, an, old_an, changed; mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | @@ -6241,7 +6272,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, @@ -6251,78 +6282,12 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { + .pcs_validate = mvpp2_gmac_pcs_validate, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, }; -static void mvpp2_phylink_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* When in 802.3z mode, we must have AN enabled: - * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... - * When <PortType> = 1 (1000BASE-X) this field must be set to 1. - */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - goto empty_set; - - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - - if (port->priv->global_tx_fc) { - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - } - - switch (state->interface) { - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_XAUI: - if (mvpp2_port_supports_xlg(port)) { - phylink_set_10g_modes(mask); - phylink_set(mask, 10000baseKR_Full); - } - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_SGMII: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - break; - - case PHY_INTERFACE_MODE_1000BASEX: - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - break; - - case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - break; - - default: - goto empty_set; - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - return; - -empty_set: - linkmode_zero(supported); -} - static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { @@ -6402,8 +6367,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); } -static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + /* Select the appropriate PCS operations depending on the + * configured interface mode. We will only switch to a mode + * that the validate() checks have already passed. + */ + if (mvpp2_is_xlg(interface)) + return &port->pcs_xlg; + else + return &port->pcs_gmac; +} + +static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); @@ -6452,31 +6432,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, } } - /* Select the appropriate PCS operations depending on the - * configured interface mode. We will only switch to a mode - * that the validate() checks have already passed. - */ - if (mvpp2_is_xlg(interface)) - port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; - else - port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; - return 0; } -static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - int ret; - - ret = mvpp2__mac_prepare(config, mode, interface); - if (ret == 0) - phylink_set_pcs(port->phylink, &port->phylink_pcs); - - return ret; -} - static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -6647,7 +6605,8 @@ static void mvpp2_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops mvpp2_phylink_ops = { - .validate = mvpp2_phylink_validate, + .validate = phylink_generic_validate, + .mac_select_pcs = mvpp2_select_pcs, .mac_prepare = mvpp2_mac_prepare, .mac_config = mvpp2_mac_config, .mac_finish = mvpp2_mac_finish, @@ -6665,12 +6624,15 @@ static void mvpp2_acpi_start(struct mvpp2_port *port) struct phylink_link_state state = { .interface = port->phy_interface, }; - mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, - port->phy_interface); + struct phylink_pcs *pcs; + + pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); + + mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); - port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, - port->phy_interface, - state.advertising, false); + pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface, + state.advertising, false); mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, port->phy_interface); mvpp2_mac_link_up(&port->phylink_config, NULL, @@ -6899,7 +6861,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, mvpp2_set_hw_csum(port, port->pool_long->id); dev->vlan_features |= features; - dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS); dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ @@ -6911,12 +6873,44 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; + port->phylink_config.mac_capabilities = + MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; + + if (port->priv->global_tx_fc) + port->phylink_config.mac_capabilities |= + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; if (mvpp2_port_supports_xlg(port)) { - __set_bit(PHY_INTERFACE_MODE_10GBASER, - port->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_XAUI, - port->phylink_config.supported_interfaces); + /* If a COMPHY is present, we can support any of + * the serdes modes and switch between them. + */ + if (comphy) { + __set_bit(PHY_INTERFACE_MODE_5GBASER, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_XAUI, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { + __set_bit(PHY_INTERFACE_MODE_5GBASER, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { + __set_bit(PHY_INTERFACE_MODE_XAUI, + port->phylink_config.supported_interfaces); + } + + if (comphy) + port->phylink_config.mac_capabilities |= + MAC_10000FD | MAC_5000FD; + else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) + port->phylink_config.mac_capabilities |= + MAC_5000FD; + else + port->phylink_config.mac_capabilities |= + MAC_10000FD; } if (mvpp2_port_supports_rgmii(port)) @@ -6946,6 +6940,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.supported_interfaces); } + port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; + port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; + phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { @@ -7456,7 +7453,7 @@ static int mvpp2_probe(struct platform_device *pdev) shared = num_present_cpus() - priv->nthreads; if (shared > 0) - bitmap_fill(&priv->lock_map, + bitmap_set(&priv->lock_map, 0, min_t(int, shared, MVPP2_MAX_THREADS)); for (i = 0; i < MVPP2_MAX_THREADS; i++) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index d6321de3cc17..e682b7bfde64 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -60,6 +60,8 @@ struct ptp *ptp_get(void) /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); + else + pci_dev_get(ptp->pdev); return ptp; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cb56e171ddd4..3ca6b942ebe2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; break; default: - return err; + goto free_regions; } mw->mbox_wq = alloc_workqueue(name, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 45357deecabb..a73a8017e0ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -172,14 +172,13 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; - char irq_name[16]; int i, ret; for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { - snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); ret = rvu_cpt_do_register_interrupt(block, off + i, rvu_cpt_af_flt_intr_handler, - irq_name); + &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index c7fd466a0efd..a09a507369ac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; - int index = 0, off = 0; - int bytes_not_copied; int buf_size = 10240; + size_t off = 0; + int index = 0; char *buf; + int ret; /* don't allow partial reads */ if (*ppos != 0) @@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, } off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); - bytes_not_copied = copy_to_user(buffer, buf, off); + ret = min(off, count); + if (copy_to_user(buffer, buf, ret)) + ret = -EFAULT; kfree(buf); iounmap(lmt_map_base); - if (bytes_not_copied) - return -EFAULT; + if (ret < 0) + return ret; - *ppos = off; - return off; + *ppos = ret; + return ret; } RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 70bacd38a6d9..d0ab8f233a02 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -41,7 +41,7 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset, struct rvu_devlink *rvu_dl = rvu->rvu_dl; int rc; - sprintf(&rvu->irq_name[offset * NAME_SIZE], name); + sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name); rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, &rvu->irq_name[offset * NAME_SIZE], rvu_dl); if (rc) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 80d4ce61f442..d85db90632d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev, } static void otx2_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_qset *qs = &pfvf->qset; @@ -372,7 +374,9 @@ static void otx2_get_ringparam(struct net_device *netdev, } static int otx2_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); bool if_up = netif_running(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 1e0d0c9c1dac..6080ebd9bd94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2002,10 +2002,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: otx2_config_hw_tx_tstamp(pfvf, false); @@ -2741,7 +2737,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 0ef68fdd1f26..61c20907315f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -5,6 +5,8 @@ * */ +#include <linux/module.h> + #include "otx2_common.h" #include "otx2_ptp.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 0cc6353254bf..7c4068c5d1ac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1198,7 +1198,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, put_page(page); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: trace_xdp_exception(pfvf->netdev, prog, act); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 78944ad3492f..925b74ebb8b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -663,7 +663,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_RXALL; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; @@ -684,7 +684,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_detach_rsrc; + goto err_ptp_destroy; } err = otx2_wq_init(vf); @@ -709,6 +709,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); +err_ptp_destroy: + otx2_ptp_destroy(vf); err_detach_rsrc: if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); @@ -742,6 +744,7 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + otx2_ptp_destroy(vf); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile index 0609df8b913d..d395f4131648 100644 --- a/drivers/net/ethernet/marvell/prestera/Makefile +++ b/drivers/net/ethernet/marvell/prestera/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \ prestera_switchdev.o prestera_acl.o prestera_flow.o \ - prestera_flower.o prestera_span.o + prestera_flower.o prestera_span.o prestera_counter.o \ + prestera_router.o prestera_router_hw.o obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index 2a4c14c704c0..a0a5a8e6bd8c 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -225,6 +225,29 @@ struct prestera_event { }; }; +enum prestera_if_type { + /* the interface is of port type (dev,port) */ + PRESTERA_IF_PORT_E = 0, + + /* the interface is of lag type (lag-id) */ + PRESTERA_IF_LAG_E = 1, + + /* the interface is of Vid type (vlan-id) */ + PRESTERA_IF_VID_E = 3, +}; + +struct prestera_iface { + enum prestera_if_type type; + struct { + u32 hw_dev_num; + u32 port_num; + } dev_port; + u32 hw_dev_num; + u16 vr_id; + u16 lag_id; + u16 vlan_id; +}; + struct prestera_switchdev; struct prestera_span; struct prestera_rxtx; @@ -247,11 +270,22 @@ struct prestera_switch { u32 mtu_min; u32 mtu_max; u8 id; + struct prestera_router *router; struct prestera_lag *lags; + struct prestera_counter *counter; u8 lag_member_max; u8 lag_max; }; +struct prestera_router { + struct prestera_switch *sw; + struct list_head vr_list; + struct list_head rif_entry_list; + struct notifier_block inetaddr_nb; + struct notifier_block inetaddr_valid_nb; + bool aborted; +}; + struct prestera_rxtx_params { bool use_sdma; u32 map_addr; @@ -279,6 +313,9 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes); +int prestera_router_init(struct prestera_switch *sw); +void prestera_router_fini(struct prestera_switch *sw); + struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id); int prestera_port_cfg_mac_read(struct prestera_port *port, @@ -293,6 +330,8 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid); bool prestera_netdev_check(const struct net_device *dev); +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr); + bool prestera_port_is_lag_member(const struct prestera_port *port); struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 83c75ffb1a1c..f0d9f592173b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -1,35 +1,72 @@ // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */ #include <linux/rhashtable.h> -#include "prestera.h" -#include "prestera_hw.h" #include "prestera_acl.h" -#include "prestera_span.h" +#include "prestera_flow.h" +#include "prestera_hw.h" +#include "prestera.h" + +#define ACL_KEYMASK_SIZE \ + (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) struct prestera_acl { struct prestera_switch *sw; + struct list_head vtcam_list; struct list_head rules; + struct rhashtable ruleset_ht; + struct rhashtable acl_rule_entry_ht; + struct idr uid; +}; + +struct prestera_acl_ruleset_ht_key { + struct prestera_flow_block *block; +}; + +struct prestera_acl_rule_entry { + struct rhash_head ht_node; + struct prestera_acl_rule_entry_key key; + u32 hw_id; + u32 vtcam_id; + struct { + struct { + u8 valid:1; + } accept, drop, trap; + struct { + u32 id; + struct prestera_counter_block *block; + } counter; + }; }; struct prestera_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct prestera_acl_ruleset_ht_key ht_key; struct rhashtable rule_ht; - struct prestera_switch *sw; - u16 id; + struct prestera_acl *acl; + unsigned long rule_count; + refcount_t refcount; + void *keymask; + u32 vtcam_id; + u16 pcl_id; + bool offload; }; -struct prestera_acl_rule { - struct rhash_head ht_node; +struct prestera_acl_vtcam { struct list_head list; - struct list_head match_list; - struct list_head action_list; - struct prestera_flow_block *block; - unsigned long cookie; - u32 priority; - u8 n_actions; - u8 n_matches; + __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + refcount_t refcount; u32 id; + bool is_keymask_set; + u8 lookup; +}; + +static const struct rhashtable_params prestera_acl_ruleset_ht_params = { + .key_len = sizeof(struct prestera_acl_ruleset_ht_key), + .key_offset = offsetof(struct prestera_acl_ruleset, ht_key), + .head_offset = offsetof(struct prestera_acl_ruleset, ht_node), + .automatic_shrinking = true, }; static const struct rhashtable_params prestera_acl_rule_ht_params = { @@ -39,28 +76,48 @@ static const struct rhashtable_params prestera_acl_rule_ht_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = { + .key_offset = offsetof(struct prestera_acl_rule_entry, key), + .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node), + .key_len = sizeof(struct prestera_acl_rule_entry_key), + .automatic_shrinking = true, +}; + static struct prestera_acl_ruleset * -prestera_acl_ruleset_create(struct prestera_switch *sw) +prestera_acl_ruleset_create(struct prestera_acl *acl, + struct prestera_flow_block *block) { struct prestera_acl_ruleset *ruleset; + u32 uid = 0; int err; ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); if (!ruleset) return ERR_PTR(-ENOMEM); + ruleset->acl = acl; + ruleset->ht_key.block = block; + refcount_set(&ruleset->refcount, 1); + err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); if (err) goto err_rhashtable_init; - err = prestera_hw_acl_ruleset_create(sw, &ruleset->id); + err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL); if (err) goto err_ruleset_create; - ruleset->sw = sw; + /* make pcl-id based on uid */ + ruleset->pcl_id = (u8)uid; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_insert; return ruleset; +err_ruleset_ht_insert: + idr_remove(&acl->uid, uid); err_ruleset_create: rhashtable_destroy(&ruleset->rule_ht); err_rhashtable_init: @@ -68,117 +125,164 @@ err_rhashtable_init: return ERR_PTR(err); } -static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) +void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask) { - prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id); - rhashtable_destroy(&ruleset->rule_ht); - kfree(ruleset); + ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); } -struct prestera_flow_block * -prestera_acl_block_create(struct prestera_switch *sw, struct net *net) +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) { - struct prestera_flow_block *block; + u32 vtcam_id; + int err; - block = kzalloc(sizeof(*block), GFP_KERNEL); - if (!block) - return NULL; - INIT_LIST_HEAD(&block->binding_list); - block->net = net; - block->sw = sw; - - block->ruleset = prestera_acl_ruleset_create(sw); - if (IS_ERR(block->ruleset)) { - kfree(block); - return NULL; - } + if (ruleset->offload) + return -EEXIST; + + err = prestera_acl_vtcam_id_get(ruleset->acl, 0, + ruleset->keymask, &vtcam_id); + if (err) + return err; - return block; + ruleset->vtcam_id = vtcam_id; + ruleset->offload = true; + return 0; } -void prestera_acl_block_destroy(struct prestera_flow_block *block) +static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) { - prestera_acl_ruleset_destroy(block->ruleset); - WARN_ON(!list_empty(&block->binding_list)); - kfree(block); + struct prestera_acl *acl = ruleset->acl; + u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER; + + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + + if (ruleset->offload) + WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id)); + + idr_remove(&acl->uid, uid); + + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset->keymask); + kfree(ruleset); } -static struct prestera_flow_block_binding * -prestera_acl_block_lookup(struct prestera_flow_block *block, - struct prestera_port *port) +static struct prestera_acl_ruleset * +__prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block) { - struct prestera_flow_block_binding *binding; + struct prestera_acl_ruleset_ht_key ht_key; - list_for_each_entry(binding, &block->binding_list, list) - if (binding->port == port) - return binding; - - return NULL; + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.block = block; + return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + prestera_acl_ruleset_ht_params); } -int prestera_acl_block_bind(struct prestera_flow_block *block, - struct prestera_port *port) +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block) { - struct prestera_flow_block_binding *binding; - int err; + struct prestera_acl_ruleset *ruleset; - if (WARN_ON(prestera_acl_block_lookup(block, port))) - return -EEXIST; + ruleset = __prestera_acl_ruleset_lookup(acl, block); + if (!ruleset) + return ERR_PTR(-ENOENT); - binding = kzalloc(sizeof(*binding), GFP_KERNEL); - if (!binding) - return -ENOMEM; - binding->span_id = PRESTERA_SPAN_INVALID_ID; - binding->port = port; + refcount_inc(&ruleset->refcount); + return ruleset; +} - err = prestera_hw_acl_port_bind(port, block->ruleset->id); - if (err) - goto err_rules_bind; +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block) +{ + struct prestera_acl_ruleset *ruleset; - list_add(&binding->list, &block->binding_list); - return 0; + ruleset = __prestera_acl_ruleset_lookup(acl, block); + if (ruleset) { + refcount_inc(&ruleset->refcount); + return ruleset; + } -err_rules_bind: - kfree(binding); - return err; + return prestera_acl_ruleset_create(acl, block); } -int prestera_acl_block_unbind(struct prestera_flow_block *block, - struct prestera_port *port) +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset) { - struct prestera_flow_block_binding *binding; + if (!refcount_dec_and_test(&ruleset->refcount)) + return; - binding = prestera_acl_block_lookup(block, port); - if (!binding) - return -ENOENT; - - list_del(&binding->list); + prestera_acl_ruleset_destroy(ruleset); +} - prestera_hw_acl_port_unbind(port, block->ruleset->id); +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) +{ + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; - kfree(binding); - return 0; + return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id, + ruleset->pcl_id); } -struct prestera_acl_ruleset * -prestera_acl_block_ruleset_get(struct prestera_flow_block *block) +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) { - return block->ruleset; + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; + + return prestera_hw_vtcam_iface_unbind(port->sw, &iface, + ruleset->vtcam_id); } -u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule) +static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) { - return rule->block->ruleset->id; + struct prestera_flow_block_binding *binding; + int err; + + block->ruleset_zero = ruleset; + list_for_each_entry(binding, &block->binding_list, list) { + err = prestera_acl_ruleset_bind(ruleset, binding->port); + if (err) + goto rollback; + } + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, &block->binding_list, + list) + err = prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; + + return err; } -struct net *prestera_acl_block_net(struct prestera_flow_block *block) +static void +prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) { - return block->net; + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; } -struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block) +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id) { - return block->sw; + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID); + __be16 pcl_id_key = htons(pcl_id); + + rule_match_set(r_match->key, PCL_ID, pcl_id_key); + rule_match_set(r_match->mask, PCL_ID, pcl_id_mask); } struct prestera_acl_rule * @@ -189,8 +293,13 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, prestera_acl_rule_ht_params); } +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) +{ + return ruleset->offload; +} + struct prestera_acl_rule * -prestera_acl_rule_create(struct prestera_flow_block *block, +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, unsigned long cookie) { struct prestera_acl_rule *rule; @@ -199,178 +308,436 @@ prestera_acl_rule_create(struct prestera_flow_block *block, if (!rule) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&rule->match_list); - INIT_LIST_HEAD(&rule->action_list); + rule->ruleset = ruleset; rule->cookie = cookie; - rule->block = block; + + refcount_inc(&ruleset->refcount); return rule; } -struct list_head * -prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule) +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority) { - return &rule->match_list; + rule->priority = priority; } -struct list_head * -prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule) +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) { - return &rule->action_list; + prestera_acl_ruleset_put(rule->ruleset); + kfree(rule); } -int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_action_entry *entry) +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule) { - struct prestera_acl_rule_action_entry *a_entry; + int err; + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; - a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL); - if (!a_entry) - return -ENOMEM; + /* try to add rule to hash table first */ + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + if (err) + goto err_ht_insert; - memcpy(a_entry, entry, sizeof(*entry)); - list_add(&a_entry->list, &rule->action_list); + prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id); + rule->re_arg.vtcam_id = ruleset->vtcam_id; + rule->re_key.prio = rule->priority; - rule->n_actions++; + /* setup counter */ + rule->re_arg.count.valid = true; + rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0; + + rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); + err = WARN_ON(rule->re) ? -EEXIST : 0; + if (err) + goto err_rule_add; + + rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key, + &rule->re_arg); + err = !rule->re ? -EINVAL : 0; + if (err) + goto err_rule_add; + + /* bind the block (all ports) to chain index 0 */ + if (!ruleset->rule_count) { + err = prestera_acl_ruleset_block_bind(ruleset, block); + if (err) + goto err_acl_block_bind; + } + + list_add_tail(&rule->list, &sw->acl->rules); + ruleset->rule_count++; return 0; + +err_acl_block_bind: + prestera_acl_rule_entry_destroy(sw->acl, rule->re); +err_rule_add: + rule->re = NULL; + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); +err_ht_insert: + return err; } -u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule) +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule) { - return rule->n_actions; + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + ruleset->rule_count--; + list_del(&rule->list); + + prestera_acl_rule_entry_destroy(sw->acl, rule->re); + + /* unbind block (all ports) */ + if (!ruleset->rule_count) + prestera_acl_ruleset_block_unbind(ruleset, block); } -u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule) +int prestera_acl_rule_get_stats(struct prestera_acl *acl, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) { - return rule->priority; + u64 current_packets; + u64 current_bytes; + int err; + + err = prestera_counter_stats_get(acl->sw->counter, + rule->re->counter.block, + rule->re->counter.id, + ¤t_packets, ¤t_bytes); + if (err) + return err; + + *packets = current_packets; + *bytes = current_bytes; + *last_use = jiffies; + + return 0; } -void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, - u32 priority) +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key) { - rule->priority = priority; + return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, + __prestera_acl_rule_entry_ht_params); } -int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_match_entry *entry) +static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) { - struct prestera_acl_rule_match_entry *m_entry; + return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id); +} - m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL); - if (!m_entry) - return -ENOMEM; +static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) +{ + struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX]; + int act_num; - memcpy(m_entry, entry, sizeof(*entry)); - list_add(&m_entry->list, &rule->match_list); + memset(&act_hw, 0, sizeof(act_hw)); + act_num = 0; - rule->n_matches++; - return 0; + /* accept */ + if (e->accept.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + act_num++; + } + /* drop */ + if (e->drop.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP; + act_num++; + } + /* trap */ + if (e->trap.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; + act_num++; + } + /* counter */ + if (e->counter.block) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT; + act_hw[act_num].count.id = e->counter.id; + act_num++; + } + + return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio, + e->key.match.key, e->key.match.mask, + act_hw, act_num, &e->hw_id); } -u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule) +static void +__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) { - return rule->n_matches; + /* counter */ + prestera_counter_put(sw->counter, e->counter.block, e->counter.id); } -void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e) { - struct prestera_acl_rule_action_entry *a_entry; - struct prestera_acl_rule_match_entry *m_entry; - struct list_head *pos, *n; + int ret; - list_for_each_safe(pos, n, &rule->match_list) { - m_entry = list_entry(pos, typeof(*m_entry), list); - list_del(pos); - kfree(m_entry); - } + rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + + ret = __prestera_acl_rule_entry2hw_del(acl->sw, e); + WARN_ON(ret && ret != -ENODEV); - list_for_each_safe(pos, n, &rule->action_list) { - a_entry = list_entry(pos, typeof(*a_entry), list); - list_del(pos); - kfree(a_entry); + __prestera_acl_rule_entry_act_destruct(acl->sw, e); + kfree(e); +} + +static int +__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e, + struct prestera_acl_rule_entry_arg *arg) +{ + /* accept */ + e->accept.valid = arg->accept.valid; + /* drop */ + e->drop.valid = arg->drop.valid; + /* trap */ + e->trap.valid = arg->trap.valid; + /* counter */ + if (arg->count.valid) { + int err; + + err = prestera_counter_get(sw->counter, arg->count.client, + &e->counter.block, + &e->counter.id); + if (err) + goto err_out; } - kfree(rule); + return 0; + +err_out: + __prestera_acl_rule_entry_act_destruct(sw, e); + return -EINVAL; } -int prestera_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule) +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg) { - u32 rule_id; + struct prestera_acl_rule_entry *e; int err; - /* try to add rule to hash table first */ - err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht, - &rule->ht_node, - prestera_acl_rule_ht_params); - if (err) - return err; + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; - /* add rule to hw */ - err = prestera_hw_acl_rule_add(sw, rule, &rule_id); + memcpy(&e->key, key, sizeof(*key)); + e->vtcam_id = arg->vtcam_id; + err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg); if (err) - goto err_rule_add; + goto err_act_construct; - rule->id = rule_id; + err = __prestera_acl_rule_entry2hw_add(acl->sw, e); + if (err) + goto err_hw_add; - list_add_tail(&rule->list, &sw->acl->rules); + err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + if (err) + goto err_ht_insert; - return 0; + return e; -err_rule_add: - rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, - prestera_acl_rule_ht_params); - return err; +err_ht_insert: + WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e)); +err_hw_add: + __prestera_acl_rule_entry_act_destruct(acl->sw, e); +err_act_construct: + kfree(e); +err_kzalloc: + return NULL; } -void prestera_acl_rule_del(struct prestera_switch *sw, - struct prestera_acl_rule *rule) +static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id) { - rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, - prestera_acl_rule_ht_params); - list_del(&rule->list); - prestera_hw_acl_rule_del(sw, rule->id); + struct prestera_acl_vtcam *vtcam; + int i; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup) + continue; + + if (!keymask && !vtcam->is_keymask_set) + goto vtcam_found; + + if (!(keymask && vtcam->is_keymask_set)) + continue; + + /* try to fit with vtcam keymask */ + for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) { + __be32 __keymask = ((__be32 *)keymask)[i]; + + if (!__keymask) + /* vtcam keymask in not interested */ + continue; + + if (__keymask & ~vtcam->keymask[i]) + /* keymask does not fit the vtcam keymask */ + break; + } + + if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) + /* keymask fits vtcam keymask, return it */ + goto vtcam_found; + } + + /* nothing is found */ + return -ENOENT; + +vtcam_found: + refcount_inc(&vtcam->refcount); + *vtcam_id = vtcam->id; + return 0; } -int prestera_acl_rule_get_stats(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u64 *packets, u64 *bytes, u64 *last_use) +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id) { - u64 current_packets; - u64 current_bytes; + struct prestera_acl_vtcam *vtcam; + u32 new_vtcam_id; int err; - err = prestera_hw_acl_rule_stats_get(sw, rule->id, ¤t_packets, - ¤t_bytes); - if (err) - return err; + /* find the vtcam that suits keymask. We do not expect to have + * a big number of vtcams, so, the list type for vtcam list is + * fine for now + */ + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup) + continue; + + if (!keymask && !vtcam->is_keymask_set) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + + if (keymask && vtcam->is_keymask_set && + !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + } - *packets = current_packets; - *bytes = current_bytes; - *last_use = jiffies; + /* vtcam not found, try to create new one */ + vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL); + if (!vtcam) + return -ENOMEM; + + err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id, + PRESTERA_HW_VTCAM_DIR_INGRESS); + if (err) { + kfree(vtcam); + + /* cannot create new, try to fit into existing vtcam */ + if (__prestera_acl_vtcam_id_try_fit(acl, lookup, + keymask, &new_vtcam_id)) + return err; + *vtcam_id = new_vtcam_id; + return 0; + } + + vtcam->id = new_vtcam_id; + vtcam->lookup = lookup; + if (keymask) { + memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask)); + vtcam->is_keymask_set = true; + } + refcount_set(&vtcam->refcount, 1); + list_add_rcu(&vtcam->list, &acl->vtcam_list); + +vtcam_found: + *vtcam_id = vtcam->id; return 0; } +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id) +{ + struct prestera_acl_vtcam *vtcam; + int err; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (vtcam_id != vtcam->id) + continue; + + if (!refcount_dec_and_test(&vtcam->refcount)) + return 0; + + err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id); + if (err && err != -ENODEV) { + refcount_set(&vtcam->refcount, 1); + return err; + } + + list_del(&vtcam->list); + kfree(vtcam); + return 0; + } + + return -ENOENT; +} + int prestera_acl_init(struct prestera_switch *sw) { struct prestera_acl *acl; + int err; acl = kzalloc(sizeof(*acl), GFP_KERNEL); if (!acl) return -ENOMEM; + acl->sw = sw; INIT_LIST_HEAD(&acl->rules); + INIT_LIST_HEAD(&acl->vtcam_list); + idr_init(&acl->uid); + + err = rhashtable_init(&acl->acl_rule_entry_ht, + &__prestera_acl_rule_entry_ht_params); + if (err) + goto err_acl_rule_entry_ht_init; + + err = rhashtable_init(&acl->ruleset_ht, + &prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_init; + sw->acl = acl; - acl->sw = sw; return 0; + +err_ruleset_ht_init: + rhashtable_destroy(&acl->acl_rule_entry_ht); +err_acl_rule_entry_ht_init: + kfree(acl); + return err; } void prestera_acl_fini(struct prestera_switch *sw) { struct prestera_acl *acl = sw->acl; + WARN_ON(!idr_is_empty(&acl->uid)); + idr_destroy(&acl->uid); + + WARN_ON(!list_empty(&acl->vtcam_list)); WARN_ON(!list_empty(&acl->rules)); + + rhashtable_destroy(&acl->ruleset_ht); + rhashtable_destroy(&acl->acl_rule_entry_ht); + kfree(acl); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index 39b7869be659..40f6c1d961fa 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -1,114 +1,130 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ #ifndef _PRESTERA_ACL_H_ #define _PRESTERA_ACL_H_ -enum prestera_acl_rule_match_entry_type { - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE +#include <linux/types.h> +#include "prestera_counter.h" + +#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF +#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \ + (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF) + +#define rule_match_set_n(match_p, type, val_p, size) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + val_p, size) +#define rule_match_set(match_p, type, val) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + &(val), sizeof(val)) + +enum prestera_acl_match_type { + PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE, + + __PRESTERA_ACL_RULE_MATCH_TYPE_MAX }; enum prestera_acl_rule_action { - PRESTERA_ACL_RULE_ACTION_ACCEPT, - PRESTERA_ACL_RULE_ACTION_DROP, - PRESTERA_ACL_RULE_ACTION_TRAP + PRESTERA_ACL_RULE_ACTION_ACCEPT = 0, + PRESTERA_ACL_RULE_ACTION_DROP = 1, + PRESTERA_ACL_RULE_ACTION_TRAP = 2, + PRESTERA_ACL_RULE_ACTION_COUNT = 7, + + PRESTERA_ACL_RULE_ACTION_MAX }; -struct prestera_switch; -struct prestera_port; -struct prestera_acl_rule; -struct prestera_acl_ruleset; +enum { + PRESTERA_ACL_IFACE_TYPE_PORT, + PRESTERA_ACL_IFACE_TYPE_INDEX +}; -struct prestera_flow_block_binding { - struct list_head list; - struct prestera_port *port; - int span_id; +struct prestera_acl_match { + __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; }; -struct prestera_flow_block { - struct list_head binding_list; - struct prestera_switch *sw; - struct net *net; - struct prestera_acl_ruleset *ruleset; - struct flow_block_cb *block_cb; +struct prestera_acl_action_count { + u32 id; }; -struct prestera_acl_rule_action_entry { - struct list_head list; - enum prestera_acl_rule_action id; +struct prestera_acl_rule_entry_key { + u32 prio; + struct prestera_acl_match match; }; -struct prestera_acl_rule_match_entry { - struct list_head list; - enum prestera_acl_rule_match_entry_type type; +struct prestera_acl_hw_action_info { + enum prestera_acl_rule_action id; union { + struct prestera_acl_action_count count; + }; +}; + +/* This struct (arg) used only to be passed as parameter for + * acl_rule_entry_create. Must be flat. Can contain object keys, which will be + * resolved to object links, before saving to acl_rule_entry struct + */ +struct prestera_acl_rule_entry_arg { + u32 vtcam_id; + struct { struct { - u8 key; - u8 mask; - } u8; - struct { - u16 key; - u16 mask; - } u16; - struct { - u32 key; - u32 mask; - } u32; - struct { - u64 key; - u64 mask; - } u64; + u8 valid:1; + } accept, drop, trap; struct { - u8 key[ETH_ALEN]; - u8 mask[ETH_ALEN]; - } mac; - } keymask; + u8 valid:1; + u32 client; + } count; + }; +}; + +struct prestera_acl_rule { + struct rhash_head ht_node; /* Member of acl HT */ + struct list_head list; + struct prestera_acl_ruleset *ruleset; + unsigned long cookie; + u32 priority; + struct prestera_acl_rule_entry_key re_key; + struct prestera_acl_rule_entry_arg re_arg; + struct prestera_acl_rule_entry *re; }; +struct prestera_acl_iface { + union { + struct prestera_port *port; + u32 index; + }; + u8 type; +}; + +struct prestera_acl; +struct prestera_switch; +struct prestera_flow_block; + int prestera_acl_init(struct prestera_switch *sw); void prestera_acl_fini(struct prestera_switch *sw); -struct prestera_flow_block * -prestera_acl_block_create(struct prestera_switch *sw, struct net *net); -void prestera_acl_block_destroy(struct prestera_flow_block *block); -struct net *prestera_acl_block_net(struct prestera_flow_block *block); -struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block); -int prestera_acl_block_bind(struct prestera_flow_block *block, - struct prestera_port *port); -int prestera_acl_block_unbind(struct prestera_flow_block *block, - struct prestera_port *port); -struct prestera_acl_ruleset * -prestera_acl_block_ruleset_get(struct prestera_flow_block *block); + struct prestera_acl_rule * -prestera_acl_rule_create(struct prestera_flow_block *block, +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, unsigned long cookie); -u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule); void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, u32 priority); -u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule); -struct list_head * -prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule); -u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule); -u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule); -int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_action_entry *entry); -struct list_head * -prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule); -int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_match_entry *entry); void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); struct prestera_acl_rule * prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, @@ -117,8 +133,39 @@ int prestera_acl_rule_add(struct prestera_switch *sw, struct prestera_acl_rule *rule); void prestera_acl_rule_del(struct prestera_switch *sw, struct prestera_acl_rule *rule); -int prestera_acl_rule_get_stats(struct prestera_switch *sw, +int prestera_acl_rule_get_stats(struct prestera_acl *acl, struct prestera_acl_rule *rule, u64 *packets, u64 *bytes, u64 *last_use); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key); +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg); +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block); +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block); +void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask); +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, + u16 pcl_id); + +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id); +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id); #endif /* _PRESTERA_ACL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c new file mode 100644 index 000000000000..4cd53a2dae46 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_counter.h" + +#define COUNTER_POLL_TIME (msecs_to_jiffies(1000)) +#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50)) +#define COUNTER_BULK_SIZE (256) + +struct prestera_counter { + struct prestera_switch *sw; + struct delayed_work stats_dw; + struct mutex mtx; /* protect block_list */ + struct prestera_counter_block **block_list; + u32 total_read; + u32 block_list_len; + u32 curr_idx; + bool is_fetching; +}; + +struct prestera_counter_block { + struct list_head list; + u32 id; + u32 offset; + u32 num_counters; + u32 client; + struct idr counter_idr; + refcount_t refcnt; + struct mutex mtx; /* protect stats and counter_idr */ + struct prestera_counter_stats *stats; + u8 *counter_flag; + bool is_updating; + bool full; +}; + +enum { + COUNTER_FLAG_READY = 0, + COUNTER_FLAG_INVALID = 1 +}; + +static bool +prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) +{ + return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; +} + +static void prestera_counter_lock(struct prestera_counter *counter) +{ + mutex_lock(&counter->mtx); +} + +static void prestera_counter_unlock(struct prestera_counter *counter) +{ + mutex_unlock(&counter->mtx); +} + +static void prestera_counter_block_lock(struct prestera_counter_block *block) +{ + mutex_lock(&block->mtx); +} + +static void prestera_counter_block_unlock(struct prestera_counter_block *block) +{ + mutex_unlock(&block->mtx); +} + +static bool prestera_counter_block_incref(struct prestera_counter_block *block) +{ + return refcount_inc_not_zero(&block->refcnt); +} + +static bool prestera_counter_block_decref(struct prestera_counter_block *block) +{ + return refcount_dec_and_test(&block->refcnt); +} + +/* must be called with prestera_counter_block_lock() */ +static void prestera_counter_stats_clear(struct prestera_counter_block *block, + u32 counter_id) +{ + memset(&block->stats[counter_id - block->offset], 0, + sizeof(*block->stats)); +} + +static struct prestera_counter_block * +prestera_counter_block_lookup_not_full(struct prestera_counter *counter, + u32 client) +{ + u32 i; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->client == client && + !counter->block_list[i]->full && + prestera_counter_block_incref(counter->block_list[i])) { + prestera_counter_unlock(counter); + return counter->block_list[i]; + } + } + prestera_counter_unlock(counter); + + return NULL; +} + +static int prestera_counter_block_list_add(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + struct prestera_counter_block **arr; + u32 i; + + prestera_counter_lock(counter); + + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i]) + continue; + + counter->block_list[i] = block; + prestera_counter_unlock(counter); + return 0; + } + + arr = krealloc(counter->block_list, (counter->block_list_len + 1) * + sizeof(*counter->block_list), GFP_KERNEL); + if (!arr) { + prestera_counter_unlock(counter); + return -ENOMEM; + } + + counter->block_list = arr; + counter->block_list[counter->block_list_len] = block; + counter->block_list_len++; + prestera_counter_unlock(counter); + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get(struct prestera_counter *counter, u32 client) +{ + struct prestera_counter_block *block; + int err; + + block = prestera_counter_block_lookup_not_full(counter, client); + if (block) + return block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + + err = prestera_hw_counter_block_get(counter->sw, client, + &block->id, &block->offset, + &block->num_counters); + if (err) + goto err_block; + + block->stats = kcalloc(block->num_counters, + sizeof(*block->stats), GFP_KERNEL); + if (!block->stats) { + err = -ENOMEM; + goto err_stats; + } + + block->counter_flag = kcalloc(block->num_counters, + sizeof(*block->counter_flag), + GFP_KERNEL); + if (!block->counter_flag) { + err = -ENOMEM; + goto err_flag; + } + + block->client = client; + mutex_init(&block->mtx); + refcount_set(&block->refcnt, 1); + idr_init_base(&block->counter_idr, block->offset); + + err = prestera_counter_block_list_add(counter, block); + if (err) + goto err_list_add; + + return block; + +err_list_add: + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->counter_flag); +err_flag: + kfree(block->stats); +err_stats: + prestera_hw_counter_block_release(counter->sw, block->id); +err_block: + kfree(block); + return ERR_PTR(err); +} + +static void prestera_counter_block_put(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + u32 i; + + if (!prestera_counter_block_decref(block)) + return; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->id == block->id) { + counter->block_list[i] = NULL; + break; + } + } + prestera_counter_unlock(counter); + + WARN_ON(!idr_is_empty(&block->counter_idr)); + + prestera_hw_counter_block_release(counter->sw, block->id); + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->stats); + kfree(block); +} + +static int prestera_counter_get_vacant(struct prestera_counter_block *block, + u32 *id) +{ + int free_id; + + if (block->full) + return -ENOSPC; + + prestera_counter_block_lock(block); + free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset, + block->offset + block->num_counters, + GFP_KERNEL); + if (free_id < 0) { + if (free_id == -ENOSPC) + block->full = true; + + prestera_counter_block_unlock(block); + return free_id; + } + *id = free_id; + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **bl, u32 *counter_id) +{ + struct prestera_counter_block *block; + int err; + u32 id; + +get_next_block: + block = prestera_counter_block_get(counter, client); + if (IS_ERR(block)) + return PTR_ERR(block); + + err = prestera_counter_get_vacant(block, &id); + if (err) { + prestera_counter_block_put(counter, block); + + if (err == -ENOSPC) + goto get_next_block; + + return err; + } + + prestera_counter_block_lock(block); + if (block->is_updating) + block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID; + prestera_counter_block_unlock(block); + + *counter_id = id; + *bl = block; + + return 0; +} + +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id) +{ + if (!block) + return; + + prestera_counter_block_lock(block); + idr_remove(&block->counter_idr, counter_id); + block->full = false; + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + prestera_hw_counter_clear(counter->sw, block->id, counter_id); + prestera_counter_block_put(counter, block); +} + +static u32 prestera_counter_block_idx_next(struct prestera_counter *counter, + u32 curr_idx) +{ + u32 idx, i, start = curr_idx + 1; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + idx = (start + i) % counter->block_list_len; + if (!counter->block_list[idx]) + continue; + + prestera_counter_unlock(counter); + return idx; + } + prestera_counter_unlock(counter); + + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) +{ + if (idx >= counter->block_list_len) + return NULL; + + prestera_counter_lock(counter); + + if (!counter->block_list[idx] || + !prestera_counter_block_incref(counter->block_list[idx])) { + prestera_counter_unlock(counter); + return NULL; + } + + prestera_counter_unlock(counter); + return counter->block_list[idx]; +} + +static void prestera_counter_stats_work(struct work_struct *work) +{ + struct delayed_work *dl_work = + container_of(work, struct delayed_work, work); + struct prestera_counter *counter = + container_of(dl_work, struct prestera_counter, stats_dw); + struct prestera_counter_block *block; + u32 resched_time = COUNTER_POLL_TIME; + u32 count = COUNTER_BULK_SIZE; + bool done = false; + int err; + u32 i; + + block = prestera_counter_block_get_by_idx(counter, counter->curr_idx); + if (!block) { + if (counter->is_fetching) + goto abort; + + goto next; + } + + if (!counter->is_fetching) { + err = prestera_hw_counter_trigger(counter->sw, block->id); + if (err) + goto abort; + + prestera_counter_block_lock(block); + block->is_updating = true; + prestera_counter_block_unlock(block); + + counter->is_fetching = true; + counter->total_read = 0; + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + prestera_counter_block_lock(block); + err = prestera_hw_counters_get(counter->sw, counter->total_read, + &count, &done, + &block->stats[counter->total_read]); + prestera_counter_block_unlock(block); + if (err) + goto abort; + + counter->total_read += count; + if (!done || counter->total_read < block->num_counters) { + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + for (i = 0; i < block->num_counters; i++) { + if (block->counter_flag[i] == COUNTER_FLAG_INVALID) { + prestera_counter_block_lock(block); + block->counter_flag[i] = COUNTER_FLAG_READY; + memset(&block->stats[i], 0, sizeof(*block->stats)); + prestera_counter_block_unlock(block); + } + } + + prestera_counter_block_lock(block); + block->is_updating = false; + prestera_counter_block_unlock(block); + + goto next; +abort: + prestera_hw_counter_abort(counter->sw); +next: + counter->is_fetching = false; + counter->curr_idx = + prestera_counter_block_idx_next(counter, counter->curr_idx); +resched: + if (block) + prestera_counter_block_put(counter, block); + + schedule_delayed_work(&counter->stats_dw, resched_time); +} + +/* Can be executed without rtnl_lock(). + * So pay attention when something changing. + */ +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes) +{ + if (!block || !prestera_counter_is_ready(block, counter_id)) { + *packets = 0; + *bytes = 0; + return 0; + } + + prestera_counter_block_lock(block); + *packets = block->stats[counter_id - block->offset].packets; + *bytes = block->stats[counter_id - block->offset].bytes; + + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_init(struct prestera_switch *sw) +{ + struct prestera_counter *counter; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return -ENOMEM; + + counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL); + if (!counter->block_list) { + kfree(counter); + return -ENOMEM; + } + + mutex_init(&counter->mtx); + counter->block_list_len = 1; + counter->sw = sw; + sw->counter = counter; + + INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work); + schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME); + + return 0; +} + +void prestera_counter_fini(struct prestera_switch *sw) +{ + struct prestera_counter *counter = sw->counter; + u32 i; + + cancel_delayed_work_sync(&counter->stats_dw); + + for (i = 0; i < counter->block_list_len; i++) + WARN_ON(counter->block_list[i]); + + mutex_destroy(&counter->mtx); + kfree(counter->block_list); + kfree(counter); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h new file mode 100644 index 000000000000..ad6b73907799 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_COUNTER_H_ +#define _PRESTERA_COUNTER_H_ + +#include <linux/types.h> + +struct prestera_counter_stats { + u64 packets; + u64 bytes; +}; + +struct prestera_switch; +struct prestera_counter; +struct prestera_counter_block; + +int prestera_counter_init(struct prestera_switch *sw); +void prestera_counter_fini(struct prestera_switch *sw); + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **block, + u32 *counter_id); +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id); +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes); + +#endif /* _PRESTERA_COUNTER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c index c9891e968259..d849f046ece7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c @@ -40,6 +40,11 @@ static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, return 0; case FLOW_CLS_STATS: return prestera_flower_stats(block, f); + case FLOW_CLS_TMPLT_CREATE: + return prestera_flower_tmplt_create(block, f); + case FLOW_CLS_TMPLT_DESTROY: + prestera_flower_tmplt_destroy(block, f); + return 0; default: return -EOPNOTSUPP; } @@ -60,11 +65,102 @@ static int prestera_flow_block_cb(enum tc_setup_type type, } } +static void prestera_flow_block_destroy(void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + prestera_flower_template_cleanup(block); + + WARN_ON(!list_empty(&block->binding_list)); + + kfree(block); +} + +static struct prestera_flow_block * +prestera_flow_block_create(struct prestera_switch *sw, struct net *net) +{ + struct prestera_flow_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + + INIT_LIST_HEAD(&block->binding_list); + block->net = net; + block->sw = sw; + + return block; +} + static void prestera_flow_block_release(void *cb_priv) { struct prestera_flow_block *block = cb_priv; - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); +} + +static bool +prestera_flow_block_is_bound(const struct prestera_flow_block *block) +{ + return block->ruleset_zero; +} + +static struct prestera_flow_block_binding * +prestera_flow_block_lookup(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + if (binding->port == port) + return binding; + + return NULL; +} + +static int prestera_flow_block_bind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + int err; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + + binding->span_id = PRESTERA_SPAN_INVALID_ID; + binding->port = port; + + if (prestera_flow_block_is_bound(block)) { + err = prestera_acl_ruleset_bind(block->ruleset_zero, port); + if (err) + goto err_ruleset_bind; + } + + list_add(&binding->list, &block->binding_list); + return 0; + +err_ruleset_bind: + kfree(binding); + return err; +} + +static int prestera_flow_block_unbind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + binding = prestera_flow_block_lookup(block, port); + if (!binding) + return -ENOENT; + + list_del(&binding->list); + + if (prestera_flow_block_is_bound(block)) + prestera_acl_ruleset_unbind(block->ruleset_zero, port); + + kfree(binding); + return 0; } static struct prestera_flow_block * @@ -78,7 +174,7 @@ prestera_flow_block_get(struct prestera_switch *sw, block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); if (!block_cb) { - block = prestera_acl_block_create(sw, f->net); + block = prestera_flow_block_create(sw, f->net); if (!block) return ERR_PTR(-ENOMEM); @@ -86,7 +182,7 @@ prestera_flow_block_get(struct prestera_switch *sw, sw, block, prestera_flow_block_release); if (IS_ERR(block_cb)) { - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); return ERR_CAST(block_cb); } @@ -110,7 +206,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block) return; flow_block_cb_free(block_cb); - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); } static int prestera_setup_flow_block_bind(struct prestera_port *port, @@ -128,7 +224,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port, block_cb = block->block_cb; - err = prestera_acl_block_bind(block, port); + err = prestera_flow_block_bind(block, port); if (err) goto err_block_bind; @@ -162,7 +258,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port, prestera_span_destroy(block); - err = prestera_acl_block_unbind(block, port); + err = prestera_flow_block_unbind(block, port); if (err) goto error; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h index 467c7038cace..1ea5b745bf72 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h @@ -7,6 +7,24 @@ #include <net/flow_offload.h> struct prestera_port; +struct prestera_switch; +struct prestera_flower_template; + +struct prestera_flow_block_binding { + struct list_head list; + struct prestera_port *port; + int span_id; +}; + +struct prestera_flow_block { + struct list_head binding_list; + struct prestera_switch *sw; + struct net *net; + struct prestera_acl_ruleset *ruleset_zero; + struct flow_block_cb *block_cb; + struct prestera_flower_template *tmplt; + unsigned int rule_count; +}; int prestera_flow_block_setup(struct prestera_port *port, struct flow_block_offload *f); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index e571ba09ec08..19c1417fd05f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -3,42 +3,61 @@ #include "prestera.h" #include "prestera_acl.h" +#include "prestera_flow.h" #include "prestera_flower.h" +struct prestera_flower_template { + struct prestera_acl_ruleset *ruleset; +}; + +void prestera_flower_template_cleanup(struct prestera_flow_block *block) +{ + if (block->tmplt) { + /* put the reference to the ruleset kept in create */ + prestera_acl_ruleset_put(block->tmplt->ruleset); + kfree(block->tmplt); + block->tmplt = NULL; + return; + } +} + static int prestera_flower_parse_actions(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_action *flow_action, struct netlink_ext_ack *extack) { - struct prestera_acl_rule_action_entry a_entry; const struct flow_action_entry *act; - int err, i; + int i; + /* whole struct (rule->re_arg) must be initialized with 0 */ if (!flow_action_has_entries(flow_action)) return 0; flow_action_for_each(i, act, flow_action) { - memset(&a_entry, 0, sizeof(a_entry)); - switch (act->id) { case FLOW_ACTION_ACCEPT: - a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + if (rule->re_arg.accept.valid) + return -EEXIST; + + rule->re_arg.accept.valid = 1; break; case FLOW_ACTION_DROP: - a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP; + if (rule->re_arg.drop.valid) + return -EEXIST; + + rule->re_arg.drop.valid = 1; break; case FLOW_ACTION_TRAP: - a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP; + if (rule->re_arg.trap.valid) + return -EEXIST; + + rule->re_arg.trap.valid = 1; break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); pr_err("Unsupported action\n"); return -EOPNOTSUPP; } - - err = prestera_acl_rule_action_add(rule, &a_entry); - if (err) - return err; } return 0; @@ -47,12 +66,12 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, struct flow_cls_offload *f, struct prestera_flow_block *block) -{ - struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); - struct prestera_acl_rule_match_entry m_entry = {0}; +{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct prestera_acl_match *r_match = &rule->re_key.match; + struct prestera_port *port; struct net_device *ingress_dev; struct flow_match_meta match; - struct prestera_port *port; + __be16 key, mask; flow_rule_match_meta(f_rule, &match); if (match.mask->ingress_ifindex != 0xFFFFFFFF) { @@ -61,7 +80,7 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, return -EINVAL; } - ingress_dev = __dev_get_by_index(prestera_acl_block_net(block), + ingress_dev = __dev_get_by_index(block->net, match.key->ingress_ifindex); if (!ingress_dev) { NL_SET_ERR_MSG_MOD(f->common.extack, @@ -76,22 +95,28 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, } port = netdev_priv(ingress_dev); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT; - m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32); - m_entry.keymask.u64.mask = ~(u64)0; + mask = htons(0x1FFF); + key = htons(port->hw_id); + rule_match_set(r_match->key, SYS_PORT, key); + rule_match_set(r_match->mask, SYS_PORT, mask); + + mask = htons(0x1FF); + key = htons(port->dev_id); + rule_match_set(r_match->key, SYS_DEV, key); + rule_match_set(r_match->mask, SYS_DEV, mask); + + return 0; - return prestera_acl_rule_match_add(rule, &m_entry); } static int prestera_flower_parse(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_cls_offload *f) -{ - struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); +{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = f_rule->match.dissector; - struct prestera_acl_rule_match_entry m_entry; - u16 n_proto_mask = 0; - u16 n_proto_key = 0; + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 n_proto_mask = 0; + __be16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; @@ -129,32 +154,19 @@ static int prestera_flower_parse(struct prestera_flow_block *block, struct flow_match_basic match; flow_rule_match_basic(f_rule, &match); - n_proto_key = ntohs(match.key->n_proto); - n_proto_mask = ntohs(match.mask->n_proto); + n_proto_key = match.key->n_proto; + n_proto_mask = match.mask->n_proto; - if (n_proto_key == ETH_P_ALL) { + if (ntohs(match.key->n_proto) == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } - /* add eth type key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE; - m_entry.keymask.u16.key = n_proto_key; - m_entry.keymask.u16.mask = n_proto_mask; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; - - /* add ip proto key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO; - m_entry.keymask.u8.key = match.key->ip_proto; - m_entry.keymask.u8.mask = match.mask->ip_proto; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ETH_TYPE, n_proto_key); + rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); + rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); + rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); ip_proto = match.key->ip_proto; } @@ -163,27 +175,27 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_eth_addrs(f_rule, &match); - /* add ethernet dst key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC; - memcpy(&m_entry.keymask.mac.key, - &match.key->dst, sizeof(match.key->dst)); - memcpy(&m_entry.keymask.mac.mask, - &match.mask->dst, sizeof(match.mask->dst)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; - - /* add ethernet src key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC; - memcpy(&m_entry.keymask.mac.key, - &match.key->src, sizeof(match.key->src)); - memcpy(&m_entry.keymask.mac.mask, - &match.mask->src, sizeof(match.mask->src)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + /* DA key, mask */ + rule_match_set_n(r_match->key, + ETH_DMAC_0, &match.key->dst[0], 4); + rule_match_set_n(r_match->key, + ETH_DMAC_1, &match.key->dst[4], 2); + + rule_match_set_n(r_match->mask, + ETH_DMAC_0, &match.mask->dst[0], 4); + rule_match_set_n(r_match->mask, + ETH_DMAC_1, &match.mask->dst[4], 2); + + /* SA key, mask */ + rule_match_set_n(r_match->key, + ETH_SMAC_0, &match.key->src[0], 4); + rule_match_set_n(r_match->key, + ETH_SMAC_1, &match.key->src[4], 2); + + rule_match_set_n(r_match->mask, + ETH_SMAC_0, &match.mask->src[0], 4); + rule_match_set_n(r_match->mask, + ETH_SMAC_1, &match.mask->src[4], 2); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -191,25 +203,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_ipv4_addrs(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC; - memcpy(&m_entry.keymask.u32.key, - &match.key->src, sizeof(match.key->src)); - memcpy(&m_entry.keymask.u32.mask, - &match.mask->src, sizeof(match.mask->src)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, IP_SRC, match.key->src); + rule_match_set(r_match->mask, IP_SRC, match.mask->src); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST; - memcpy(&m_entry.keymask.u32.key, - &match.key->dst, sizeof(match.key->dst)); - memcpy(&m_entry.keymask.u32.mask, - &match.mask->dst, sizeof(match.mask->dst)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, IP_DST, match.key->dst); + rule_match_set(r_match->mask, IP_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { @@ -224,21 +222,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_ports(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC; - m_entry.keymask.u16.key = ntohs(match.key->src); - m_entry.keymask.u16.mask = ntohs(match.mask->src); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); + rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST; - m_entry.keymask.u16.key = ntohs(match.key->dst); - m_entry.keymask.u16.mask = ntohs(match.mask->dst); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); + rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { @@ -247,22 +235,15 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_vlan(f_rule, &match); if (match.mask->vlan_id != 0) { - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID; - m_entry.keymask.u16.key = match.key->vlan_id; - m_entry.keymask.u16.mask = match.mask->vlan_id; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + __be16 key = cpu_to_be16(match.key->vlan_id); + __be16 mask = cpu_to_be16(match.mask->vlan_id); + + rule_match_set(r_match->key, VLAN_ID, key); + rule_match_set(r_match->mask, VLAN_ID, mask); } - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID; - m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid); - m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); + rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { @@ -270,90 +251,164 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_icmp(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE; - m_entry.keymask.u8.key = match.key->type; - m_entry.keymask.u8.mask = match.mask->type; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ICMP_TYPE, match.key->type); + rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE; - m_entry.keymask.u8.key = match.key->code; - m_entry.keymask.u8.mask = match.mask->code; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ICMP_CODE, match.key->code); + rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); } - return prestera_flower_parse_actions(block, rule, - &f->rule->action, + return prestera_flower_parse_actions(block, rule, &f->rule->action, f->common.extack); } int prestera_flower_replace(struct prestera_flow_block *block, struct flow_cls_offload *f) { - struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_ruleset *ruleset; + struct prestera_acl *acl = block->sw->acl; struct prestera_acl_rule *rule; int err; - rule = prestera_acl_rule_create(block, f->cookie); - if (IS_ERR(rule)) - return PTR_ERR(rule); + ruleset = prestera_acl_ruleset_get(acl, block); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* increments the ruleset reference */ + rule = prestera_acl_rule_create(ruleset, f->cookie); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } err = prestera_flower_parse(block, rule, f); if (err) - goto err_flower_parse; + goto err_rule_add; + + if (!prestera_acl_ruleset_is_offload(ruleset)) { + err = prestera_acl_ruleset_offload(ruleset); + if (err) + goto err_ruleset_offload; + } - err = prestera_acl_rule_add(sw, rule); + err = prestera_acl_rule_add(block->sw, rule); if (err) goto err_rule_add; + prestera_acl_ruleset_put(ruleset); return 0; +err_ruleset_offload: err_rule_add: -err_flower_parse: prestera_acl_rule_destroy(rule); +err_rule_create: + prestera_acl_ruleset_put(ruleset); return err; } void prestera_flower_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f) { + struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; - struct prestera_switch *sw; - rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), - f->cookie); + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + if (IS_ERR(ruleset)) + return; + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); if (rule) { - sw = prestera_acl_block_sw(block); - prestera_acl_rule_del(sw, rule); + prestera_acl_rule_del(block->sw, rule); prestera_acl_rule_destroy(rule); } + prestera_acl_ruleset_put(ruleset); + +} + +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_flower_template *template; + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_rule rule; + int err; + + memset(&rule, 0, sizeof(rule)); + err = prestera_flower_parse(block, &rule, f); + if (err) + return err; + + template = kmalloc(sizeof(*template), GFP_KERNEL); + if (!template) { + err = -ENOMEM; + goto err_malloc; + } + + prestera_acl_rule_keymask_pcl_id_set(&rule, 0); + ruleset = prestera_acl_ruleset_get(block->sw->acl, block); + if (IS_ERR_OR_NULL(ruleset)) { + err = -EINVAL; + goto err_ruleset_get; + } + + /* preserve keymask/template to this ruleset */ + prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + + /* skip error, as it is not possible to reject template operation, + * so, keep the reference to the ruleset for rules to be added + * to that ruleset later. In case of offload fail, the ruleset + * will be offloaded again during adding a new rule. Also, + * unlikly possble that ruleset is already offloaded at this staage. + */ + prestera_acl_ruleset_offload(ruleset); + + /* keep the reference to the ruleset */ + template->ruleset = ruleset; + block->tmplt = template; + return 0; + +err_ruleset_get: + kfree(template); +err_malloc: + NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed"); + return err; +} + +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + prestera_flower_template_cleanup(block); } int prestera_flower_stats(struct prestera_flow_block *block, struct flow_cls_offload *f) { - struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; u64 packets; u64 lastuse; u64 bytes; int err; - rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), - f->cookie); - if (!rule) - return -EINVAL; + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); + if (!rule) { + err = -EINVAL; + goto err_rule_get_stats; + } - err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse); + err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, + &bytes, &lastuse); if (err) - return err; + goto err_rule_get_stats; flow_stats_update(&f->stats, bytes, packets, 0, lastuse, - FLOW_ACTION_HW_STATS_IMMEDIATE); - return 0; + FLOW_ACTION_HW_STATS_DELAYED); + +err_rule_get_stats: + prestera_acl_ruleset_put(ruleset); + return err; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h index 91e045eec58b..dc3aa4280e9f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h @@ -1,11 +1,12 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ #ifndef _PRESTERA_FLOWER_H_ #define _PRESTERA_FLOWER_H_ #include <net/pkt_cls.h> +struct prestera_switch; struct prestera_flow_block; int prestera_flower_replace(struct prestera_flow_block *block, @@ -14,5 +15,10 @@ void prestera_flower_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f); int prestera_flower_stats(struct prestera_flow_block *block, struct flow_cls_offload *f); +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_template_cleanup(struct prestera_flow_block *block); #endif /* _PRESTERA_FLOWER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 9b8b1ed474fc..51fc841b1e7a 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -9,6 +9,7 @@ #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" +#include "prestera_counter.h" #define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) @@ -38,13 +39,24 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, - PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500, - PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501, - PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510, - PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520, - PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521, - PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530, - PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531, + PRESTERA_CMD_TYPE_COUNTER_GET = 0x510, + PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511, + PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514, + PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515, + + PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540, + PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541, + PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550, + PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551, + PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560, + PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561, + + PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600, + PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, + PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, + PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, @@ -359,76 +371,84 @@ struct prestera_msg_bridge_resp { u8 pad[2]; }; -struct prestera_msg_acl_action { - __le32 id; - __le32 reserved[5]; +struct prestera_msg_vtcam_create_req { + struct prestera_msg_cmd cmd; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + u8 direction; + u8 lookup; + u8 pad[2]; }; -struct prestera_msg_acl_match { - __le32 type; - __le32 __reserved; - union { - struct { - u8 key; - u8 mask; - } u8; - struct { - __le16 key; - __le16 mask; - } u16; - struct { - __le32 key; - __le32 mask; - } u32; - struct { - __le64 key; - __le64 mask; - } u64; - struct { - u8 key[ETH_ALEN]; - u8 mask[ETH_ALEN]; - } mac; - } keymask; +struct prestera_msg_vtcam_destroy_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; }; -struct prestera_msg_acl_rule_req { +struct prestera_msg_vtcam_rule_add_req { struct prestera_msg_cmd cmd; - __le32 id; - __le32 priority; - __le16 ruleset_id; - u8 n_actions; - u8 n_matches; + __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 vtcam_id; + __le32 prio; + __le32 n_act; }; -struct prestera_msg_acl_rule_resp { - struct prestera_msg_ret ret; +struct prestera_msg_vtcam_rule_del_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; __le32 id; }; -struct prestera_msg_acl_rule_stats_resp { +struct prestera_msg_vtcam_bind_req { + struct prestera_msg_cmd cmd; + union { + struct { + __le32 hw_id; + __le32 dev_id; + } port; + __le32 index; + }; + __le32 vtcam_id; + __le16 pcl_id; + __le16 type; +}; + +struct prestera_msg_vtcam_resp { struct prestera_msg_ret ret; - __le64 packets; - __le64 bytes; + __le32 vtcam_id; + __le32 rule_id; }; -struct prestera_msg_acl_ruleset_bind_req { - struct prestera_msg_cmd cmd; - __le32 port; - __le32 dev; - __le16 ruleset_id; - u8 pad[2]; +struct prestera_msg_acl_action { + __le32 id; + __le32 __reserved; + union { + struct { + __le32 id; + } count; + __le32 reserved[6]; + }; }; -struct prestera_msg_acl_ruleset_req { +struct prestera_msg_counter_req { struct prestera_msg_cmd cmd; - __le16 id; - u8 pad[2]; + __le32 client; + __le32 block_id; + __le32 num_counters; +}; + +struct prestera_msg_counter_stats { + __le64 packets; + __le64 bytes; }; -struct prestera_msg_acl_ruleset_resp { +struct prestera_msg_counter_resp { struct prestera_msg_ret ret; - __le16 id; - u8 pad[2]; + __le32 block_id; + __le32 offset; + __le32 num_counters; + __le32 done; + struct prestera_msg_counter_stats stats[]; }; struct prestera_msg_span_req { @@ -465,6 +485,48 @@ struct prestera_msg_rxtx_resp { __le32 map_addr; }; +struct prestera_msg_iface { + union { + struct { + __le32 dev; + __le32 port; + }; + __le16 lag_id; + }; + __le16 vr_id; + __le16 vid; + u8 type; + u8 __pad[3]; +}; + +struct prestera_msg_rif_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_iface iif; + __le32 mtu; + __le16 rif_id; + __le16 __reserved; + u8 mac[ETH_ALEN]; + u8 __pad[2]; +}; + +struct prestera_msg_rif_resp { + struct prestera_msg_ret ret; + __le16 rif_id; + u8 __pad[2]; +}; + +struct prestera_msg_vr_req { + struct prestera_msg_cmd cmd; + __le16 vr_id; + u8 __pad[2]; +}; + +struct prestera_msg_vr_resp { + struct prestera_msg_ret ret; + __le16 vr_id; + u8 __pad[2]; +}; + struct prestera_msg_lag_req { struct prestera_msg_cmd cmd; __le32 port; @@ -521,14 +583,24 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); + + /* structure that are part of req/resp fw messages */ + BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); /* check responses */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); @@ -537,11 +609,12 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248); BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); @@ -1044,225 +1117,162 @@ static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause) } } -int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t dir) { - struct prestera_msg_acl_ruleset_resp resp; - struct prestera_msg_acl_ruleset_req req; int err; + struct prestera_msg_vtcam_resp resp; + struct prestera_msg_vtcam_create_req req = { + .lookup = lookup, + .direction = dir, + }; + + if (keymask) + memcpy(req.keymask, keymask, sizeof(req.keymask)); + else + memset(req.keymask, 0, sizeof(req.keymask)); - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE, + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; - *ruleset_id = __le16_to_cpu(resp.id); - + *vtcam_id = __le32_to_cpu(resp.vtcam_id); return 0; } -int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id) +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id) { - struct prestera_msg_acl_ruleset_req req = { - .id = __cpu_to_le16(ruleset_id), + struct prestera_msg_vtcam_destroy_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), }; - return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY, &req.cmd, sizeof(req)); } -static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action, - struct prestera_acl_rule *rule) +static int +prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, + struct prestera_acl_hw_action_info *info) { - struct list_head *a_list = prestera_acl_rule_action_list_get(rule); - struct prestera_acl_rule_action_entry *a_entry; - int i = 0; - - list_for_each_entry(a_entry, a_list, list) { - action[i].id = __cpu_to_le32(a_entry->id); - - switch (a_entry->id) { - case PRESTERA_ACL_RULE_ACTION_ACCEPT: - case PRESTERA_ACL_RULE_ACTION_DROP: - case PRESTERA_ACL_RULE_ACTION_TRAP: - /* just rule action id, no specific data */ - break; - default: - return -EINVAL; - } + action->id = __cpu_to_le32(info->id); - i++; - } - - return 0; -} - -static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, - struct prestera_acl_rule *rule) -{ - struct list_head *m_list = prestera_acl_rule_match_list_get(rule); - struct prestera_acl_rule_match_entry *m_entry; - int i = 0; - - list_for_each_entry(m_entry, m_list, list) { - match[i].type = __cpu_to_le32(m_entry->type); - - switch (m_entry->type) { - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID: - match[i].keymask.u16.key = - __cpu_to_le16(m_entry->keymask.u16.key); - match[i].keymask.u16.mask = - __cpu_to_le16(m_entry->keymask.u16.mask); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO: - match[i].keymask.u8.key = m_entry->keymask.u8.key; - match[i].keymask.u8.mask = m_entry->keymask.u8.mask; - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC: - memcpy(match[i].keymask.mac.key, - m_entry->keymask.mac.key, - sizeof(match[i].keymask.mac.key)); - memcpy(match[i].keymask.mac.mask, - m_entry->keymask.mac.mask, - sizeof(match[i].keymask.mac.mask)); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST: - match[i].keymask.u32.key = - __cpu_to_le32(m_entry->keymask.u32.key); - match[i].keymask.u32.mask = - __cpu_to_le32(m_entry->keymask.u32.mask); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT: - match[i].keymask.u64.key = - __cpu_to_le64(m_entry->keymask.u64.key); - match[i].keymask.u64.mask = - __cpu_to_le64(m_entry->keymask.u64.mask); - break; - default: - return -EINVAL; - } - - i++; + switch (info->id) { + case PRESTERA_ACL_RULE_ACTION_ACCEPT: + case PRESTERA_ACL_RULE_ACTION_DROP: + case PRESTERA_ACL_RULE_ACTION_TRAP: + /* just rule action id, no specific data */ + break; + case PRESTERA_ACL_RULE_ACTION_COUNT: + action->count.id = __cpu_to_le32(info->count.id); + break; + default: + return -EINVAL; } return 0; } -int prestera_hw_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u32 *rule_id) +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, + u32 vtcam_id, u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id) { - struct prestera_msg_acl_action *actions; - struct prestera_msg_acl_match *matches; - struct prestera_msg_acl_rule_resp resp; - struct prestera_msg_acl_rule_req *req; - u8 n_actions; - u8 n_matches; + struct prestera_msg_acl_action *actions_msg; + struct prestera_msg_vtcam_rule_add_req *req; + struct prestera_msg_vtcam_resp resp; void *buff; u32 size; int err; + u8 i; - n_actions = prestera_acl_rule_action_len(rule); - n_matches = prestera_acl_rule_match_len(rule); - - size = sizeof(*req) + sizeof(*actions) * n_actions + - sizeof(*matches) * n_matches; + size = sizeof(*req) + sizeof(*actions_msg) * n_act; buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; req = buff; - actions = buff + sizeof(*req); - matches = buff + sizeof(*req) + sizeof(*actions) * n_actions; - - /* put acl actions into the message */ - err = prestera_hw_acl_actions_put(actions, rule); - if (err) - goto free_buff; + req->n_act = __cpu_to_le32(n_act); + actions_msg = buff + sizeof(*req); /* put acl matches into the message */ - err = prestera_hw_acl_matches_put(matches, rule); - if (err) - goto free_buff; + memcpy(req->key, key, sizeof(req->key)); + memcpy(req->keymask, keymask, sizeof(req->keymask)); - req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule)); - req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule)); - req->n_actions = prestera_acl_rule_action_len(rule); - req->n_matches = prestera_acl_rule_match_len(rule); + /* put acl actions into the message */ + for (i = 0; i < n_act; i++) { + err = prestera_acl_rule_add_put_action(&actions_msg[i], + &act[i]); + if (err) + goto free_buff; + } - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD, + req->vtcam_id = __cpu_to_le32(vtcam_id); + req->prio = __cpu_to_le32(prio); + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD, &req->cmd, size, &resp.ret, sizeof(resp)); if (err) goto free_buff; - *rule_id = __le32_to_cpu(resp.id); + *rule_id = __le32_to_cpu(resp.rule_id); free_buff: kfree(buff); return err; } -int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id) +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id) { - struct prestera_msg_acl_rule_req req = { + struct prestera_msg_vtcam_rule_del_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), .id = __cpu_to_le32(rule_id) }; - return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE, &req.cmd, sizeof(req)); } -int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, - u64 *packets, u64 *bytes) +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id) { - struct prestera_msg_acl_rule_stats_resp resp; - struct prestera_msg_acl_rule_req req = { - .id = __cpu_to_le32(rule_id) + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type), + .pcl_id = __cpu_to_le16(pcl_id) }; - int err; - - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *packets = __le64_to_cpu(resp.packets); - *bytes = __le64_to_cpu(resp.bytes); - - return 0; -} -int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id) -{ - struct prestera_msg_acl_ruleset_bind_req req = { - .port = __cpu_to_le32(port->hw_id), - .dev = __cpu_to_le32(port->dev_id), - .ruleset_id = __cpu_to_le16(ruleset_id), - }; + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND, &req.cmd, sizeof(req)); } -int prestera_hw_acl_port_unbind(const struct prestera_port *port, - u16 ruleset_id) +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id) { - struct prestera_msg_acl_ruleset_bind_req req = { - .port = __cpu_to_le32(port->hw_id), - .dev = __cpu_to_le32(port->dev_id), - .ruleset_id = __cpu_to_le16(ruleset_id), + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type) }; - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND, + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND, &req.cmd, sizeof(req)); } @@ -1796,6 +1806,91 @@ int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) &req.cmd, sizeof(req)); } +static int prestera_iface_to_msg(struct prestera_iface *iface, + struct prestera_msg_iface *msg_if) +{ + switch (iface->type) { + case PRESTERA_IF_PORT_E: + case PRESTERA_IF_VID_E: + msg_if->port = __cpu_to_le32(iface->dev_port.port_num); + msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num); + break; + case PRESTERA_IF_LAG_E: + msg_if->lag_id = __cpu_to_le16(iface->lag_id); + break; + default: + return -EOPNOTSUPP; + } + + msg_if->vr_id = __cpu_to_le16(iface->vr_id); + msg_if->vid = __cpu_to_le16(iface->vlan_id); + msg_if->type = iface->type; + return 0; +} + +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id) +{ + struct prestera_msg_rif_req req; + struct prestera_msg_rif_resp resp; + int err; + + memcpy(req.mac, mac, ETH_ALEN); + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *rif_id = __le16_to_cpu(resp.rif_id); + return err; +} + +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif) +{ + struct prestera_msg_rif_req req = { + .rif_id = __cpu_to_le16(rif_id), + }; + int err; + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id) +{ + int err; + struct prestera_msg_vr_resp resp; + struct prestera_msg_vr_req req; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *vr_id = __le16_to_cpu(resp.vr_id); + return err; +} + +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id) +{ + struct prestera_msg_vr_req req = { + .vr_id = __cpu_to_le16(vr_id), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd, + sizeof(req)); +} + int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params) { @@ -1916,3 +2011,100 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw, list_del_rcu(&eh->list); kfree_rcu(eh, rcu); } + +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_abort(struct prestera_switch *sw) +{ + struct prestera_msg_counter_req req; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats) +{ + struct prestera_msg_counter_resp *resp; + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(idx), + .num_counters = __cpu_to_le32(*len), + }; + size_t size = struct_size(resp, stats, *len); + int err, i; + + resp = kmalloc(size, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET, + &req.cmd, sizeof(req), &resp->ret, size); + if (err) + goto free_buff; + + for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) { + stats[i].packets += __le64_to_cpu(resp->stats[i].packets); + stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes); + } + + *len = __le32_to_cpu(resp->num_counters); + *done = __le32_to_cpu(resp->done); + +free_buff: + kfree(resp); + return err; +} + +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters) +{ + struct prestera_msg_counter_resp resp; + struct prestera_msg_counter_req req = { + .client = __cpu_to_le32(client) + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *block_id = __le32_to_cpu(resp.block_id); + *offset = __le32_to_cpu(resp.offset); + *num_counters = __le32_to_cpu(resp.num_counters); + + return 0; +} + +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id), + .num_counters = __cpu_to_le32(counter_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, + &req.cmd, sizeof(req)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index 57a3c2e5b112..3ff12bae5909 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -5,6 +5,7 @@ #define _PRESTERA_HW_H_ #include <linux/types.h> +#include "prestera_acl.h" enum prestera_accept_frm_type { PRESTERA_ACCEPT_FRAME_TYPE_TAGGED, @@ -111,18 +112,32 @@ enum prestera_hw_cpu_code_cnt_t { PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, }; +enum prestera_hw_vtcam_direction_t { + PRESTERA_HW_VTCAM_DIR_INGRESS = 0, + PRESTERA_HW_VTCAM_DIR_EGRESS = 1, +}; + +enum { + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2, +}; + struct prestera_switch; struct prestera_port; struct prestera_port_stats; struct prestera_port_caps; enum prestera_event_type; struct prestera_event; -struct prestera_acl_rule; typedef void (*prestera_event_cb_t) (struct prestera_switch *sw, struct prestera_event *evt, void *arg); struct prestera_rxtx_params; +struct prestera_acl_hw_action_info; +struct prestera_acl_iface; +struct prestera_counter_stats; +struct prestera_iface; /* Switch API */ int prestera_hw_switch_init(struct prestera_switch *sw); @@ -186,21 +201,37 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id); int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id); int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id); -/* ACL API */ -int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, - u16 *ruleset_id); -int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, - u16 ruleset_id); -int prestera_hw_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u32 *rule_id); -int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id); -int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, - u32 rule_id, u64 *packets, u64 *bytes); -int prestera_hw_acl_port_bind(const struct prestera_port *port, - u16 ruleset_id); -int prestera_hw_acl_port_unbind(const struct prestera_port *port, - u16 ruleset_id); +/* vTCAM API */ +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t direction); +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id, + u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id); +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id); +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id); +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id); +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id); + +/* Counter API */ +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id); +int prestera_hw_counter_abort(struct prestera_switch *sw); +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats); +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters); +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id); +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id); /* SPAN API */ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id); @@ -208,6 +239,16 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id); int prestera_hw_span_unbind(const struct prestera_port *port); int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id); +/* Router API */ +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id); +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif); + +/* Virtual Router API */ +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id); +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id); + /* Event handlers */ int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 4369a3ffad45..08fdd1e50388 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -18,6 +18,7 @@ #include "prestera_rxtx.h" #include "prestera_devlink.h" #include "prestera_ethtool.h" +#include "prestera_counter.h" #include "prestera_switchdev.h" #define PRESTERA_MTU_DEFAULT 1536 @@ -54,12 +55,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid) struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, u32 dev_id, u32 hw_id) { - struct prestera_port *port = NULL; + struct prestera_port *port = NULL, *tmp; read_lock(&sw->port_list_lock); - list_for_each_entry(port, &sw->port_list, list) { - if (port->dev_id == dev_id && port->hw_id == hw_id) + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) { + port = tmp; break; + } } read_unlock(&sw->port_list_lock); @@ -68,12 +71,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id) { - struct prestera_port *port = NULL; + struct prestera_port *port = NULL, *tmp; read_lock(&sw->port_list_lock); - list_for_each_entry(port, &sw->port_list, list) { - if (port->id == id) + list_for_each_entry(tmp, &sw->port_list, list) { + if (tmp->id == id) { + port = tmp; break; + } } read_unlock(&sw->port_list_lock); @@ -158,7 +163,7 @@ static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, return prestera_rxtx_xmit(netdev_priv(dev), skb); } -static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr) +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr) { if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; @@ -764,23 +769,27 @@ static int prestera_netdev_port_event(struct net_device *lower, struct net_device *dev, unsigned long event, void *ptr) { - struct netdev_notifier_changeupper_info *info = ptr; + struct netdev_notifier_info *info = ptr; + struct netdev_notifier_changeupper_info *cu_info; struct prestera_port *port = netdev_priv(dev); struct netlink_ext_ack *extack; struct net_device *upper; - extack = netdev_notifier_info_to_extack(&info->info); - upper = info->upper_dev; + extack = netdev_notifier_info_to_extack(info); + cu_info = container_of(info, + struct netdev_notifier_changeupper_info, + info); switch (event) { case NETDEV_PRECHANGEUPPER: + upper = cu_info->upper_dev; if (!netif_is_bridge_master(upper) && !netif_is_lag_master(upper)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } - if (!info->linking) + if (!cu_info->linking) break; if (netdev_has_any_upper_dev(upper)) { @@ -789,7 +798,7 @@ static int prestera_netdev_port_event(struct net_device *lower, } if (netif_is_lag_master(upper) && - !prestera_lag_master_check(upper, info->upper_info, extack)) + !prestera_lag_master_check(upper, cu_info->upper_info, extack)) return -EOPNOTSUPP; if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, @@ -805,14 +814,15 @@ static int prestera_netdev_port_event(struct net_device *lower, break; case NETDEV_CHANGEUPPER: + upper = cu_info->upper_dev; if (netif_is_bridge_master(upper)) { - if (info->linking) + if (cu_info->linking) return prestera_bridge_port_join(upper, port, extack); else prestera_bridge_port_leave(upper, port); } else if (netif_is_lag_master(upper)) { - if (info->linking) + if (cu_info->linking) return prestera_lag_port_add(port, upper); else prestera_lag_port_del(port); @@ -892,6 +902,10 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) return err; + err = prestera_router_init(sw); + if (err) + goto err_router_init; + err = prestera_switchdev_init(sw); if (err) goto err_swdev_register; @@ -904,6 +918,10 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_handlers_register; + err = prestera_counter_init(sw); + if (err) + goto err_counter_init; + err = prestera_acl_init(sw); if (err) goto err_acl_init; @@ -936,12 +954,16 @@ err_dl_register: err_span_init: prestera_acl_fini(sw); err_acl_init: + prestera_counter_fini(sw); +err_counter_init: prestera_event_handlers_unregister(sw); err_handlers_register: prestera_rxtx_switch_fini(sw); err_rxtx_register: prestera_switchdev_fini(sw); err_swdev_register: + prestera_router_fini(sw); +err_router_init: prestera_netdev_event_handler_unregister(sw); prestera_hw_switch_fini(sw); @@ -956,6 +978,7 @@ static void prestera_switch_fini(struct prestera_switch *sw) prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); + prestera_counter_fini(sw); prestera_event_handlers_unregister(sw); prestera_rxtx_switch_fini(sw); prestera_switchdev_fini(sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c new file mode 100644 index 000000000000..8a3b7b664358 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/inetdevice.h> +#include <net/switchdev.h> + +#include "prestera.h" +#include "prestera_router_hw.h" + +/* This util to be used, to convert kernel rules for default vr in hw_vr */ +static u32 prestera_fix_tb_id(u32 tb_id) +{ + if (tb_id == RT_TABLE_UNSPEC || + tb_id == RT_TABLE_LOCAL || + tb_id == RT_TABLE_DEFAULT) + tb_id = RT_TABLE_MAIN; + + return tb_id; +} + +static int __prestera_inetaddr_port_event(struct net_device *port_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(port_dev); + int err; + struct prestera_rif_entry *re; + struct prestera_rif_entry_key re_key = {}; + u32 kern_tb_id; + + err = prestera_is_valid_mac_addr(port, port_dev->dev_addr); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix"); + return err; + } + + kern_tb_id = l3mdev_fib_table(port_dev); + re_key.iface.type = PRESTERA_IF_PORT_E; + re_key.iface.dev_port.hw_dev_num = port->dev_id; + re_key.iface.dev_port.port_num = port->hw_id; + re = prestera_rif_entry_find(port->sw, &re_key); + + switch (event) { + case NETDEV_UP: + if (re) { + NL_SET_ERR_MSG_MOD(extack, "rif_entry already exist"); + return -EEXIST; + } + re = prestera_rif_entry_create(port->sw, &re_key, + prestera_fix_tb_id(kern_tb_id), + port_dev->dev_addr); + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "Can't create rif_entry"); + return -EINVAL; + } + dev_hold(port_dev); + break; + case NETDEV_DOWN: + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "rif_entry not exist"); + return -EEXIST; + } + prestera_rif_entry_destroy(port->sw, re); + dev_put(port_dev); + break; + } + + return 0; +} + +static int __prestera_inetaddr_event(struct prestera_switch *sw, + struct net_device *dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + if (prestera_netdev_check(dev) && !netif_is_bridge_port(dev) && + !netif_is_lag_port(dev) && !netif_is_ovs_port(dev)) + return __prestera_inetaddr_port_event(dev, event, extack); + + return 0; +} + +static int __prestera_inetaddr_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_DOWN) + goto out; + + /* Ignore if this is not latest address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + err = __prestera_inetaddr_event(router->sw, dev, event, NULL); +out: + return notifier_from_errno(err); +} + +static int __prestera_inetaddr_valid_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_validator_info *ivi = (struct in_validator_info *)ptr; + struct net_device *dev = ivi->ivi_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_valid_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_UP) + goto out; + + /* Ignore if this is not first address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + if (ipv4_is_multicast(ivi->ivi_addr)) { + err = -EINVAL; + goto out; + } + + err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack); +out: + return notifier_from_errno(err); +} + +int prestera_router_init(struct prestera_switch *sw) +{ + struct prestera_router *router; + int err; + + router = kzalloc(sizeof(*sw->router), GFP_KERNEL); + if (!router) + return -ENOMEM; + + sw->router = router; + router->sw = sw; + + err = prestera_router_hw_init(sw); + if (err) + goto err_router_lib_init; + + router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; + err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); + if (err) + goto err_register_inetaddr_validator_notifier; + + router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + return 0; + +err_register_inetaddr_notifier: + unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); +err_register_inetaddr_validator_notifier: + /* prestera_router_hw_fini */ +err_router_lib_init: + kfree(sw->router); + return err; +} + +void prestera_router_fini(struct prestera_switch *sw) +{ + unregister_inetaddr_notifier(&sw->router->inetaddr_nb); + unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); + /* router_hw_fini */ + kfree(sw->router); + sw->router = NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c new file mode 100644 index 000000000000..5866a4be50f5 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/rhashtable.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_router_hw.h" +#include "prestera_acl.h" + +/* +--+ + * +------->|vr| + * | +--+ + * | + * +-+-------+ + * |rif_entry| + * +---------+ + * Rif is + * used as + * entry point + * for vr in hw + */ + +int prestera_router_hw_init(struct prestera_switch *sw) +{ + INIT_LIST_HEAD(&sw->router->vr_list); + INIT_LIST_HEAD(&sw->router->rif_entry_list); + + return 0; +} + +static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, + u32 tb_id) +{ + struct prestera_vr *vr; + + list_for_each_entry(vr, &sw->router->vr_list, router_node) { + if (vr->tb_id == tb_id) + return vr; + } + + return NULL; +} + +static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, + u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + u16 hw_vr_id; + int err; + + err = prestera_hw_vr_create(sw, &hw_vr_id); + if (err) + return ERR_PTR(-ENOMEM); + + vr = kzalloc(sizeof(*vr), GFP_KERNEL); + if (!vr) { + err = -ENOMEM; + goto err_alloc_vr; + } + + vr->tb_id = tb_id; + vr->hw_vr_id = hw_vr_id; + + list_add(&vr->router_node, &sw->router->vr_list); + + return vr; + +err_alloc_vr: + prestera_hw_vr_delete(sw, hw_vr_id); + kfree(vr); + return ERR_PTR(err); +} + +static void __prestera_vr_destroy(struct prestera_switch *sw, + struct prestera_vr *vr) +{ + prestera_hw_vr_delete(sw, vr->hw_vr_id); + list_del(&vr->router_node); + kfree(vr); +} + +static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + + vr = __prestera_vr_find(sw, tb_id); + if (!vr) + vr = __prestera_vr_create(sw, tb_id, extack); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + return vr; +} + +static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr) +{ + if (!vr->ref_cnt) + __prestera_vr_destroy(sw, vr); +} + +/* iface is overhead struct. vr_id also can be removed. */ +static int +__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in, + struct prestera_rif_entry_key *out) +{ + memset(out, 0, sizeof(*out)); + + switch (in->iface.type) { + case PRESTERA_IF_PORT_E: + out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num; + out->iface.dev_port.port_num = in->iface.dev_port.port_num; + break; + case PRESTERA_IF_LAG_E: + out->iface.lag_id = in->iface.lag_id; + break; + case PRESTERA_IF_VID_E: + out->iface.vlan_id = in->iface.vlan_id; + break; + default: + pr_err("Unsupported iface type"); + return -EINVAL; + } + + out->iface.type = in->iface.type; + return 0; +} + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k) +{ + struct prestera_rif_entry *rif_entry; + struct prestera_rif_entry_key lk; /* lookup key */ + + if (__prestera_rif_entry_key_copy(k, &lk)) + return NULL; + + list_for_each_entry(rif_entry, &sw->router->rif_entry_list, + router_node) { + if (!memcmp(k, &rif_entry->key, sizeof(*k))) + return rif_entry; + } + + return NULL; +} + +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e) +{ + struct prestera_iface iface; + + list_del(&e->router_node); + + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + prestera_hw_rif_delete(sw, e->hw_id, &iface); + + e->vr->ref_cnt--; + prestera_vr_put(sw, e->vr); + kfree(e); +} + +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr) +{ + int err; + struct prestera_rif_entry *e; + struct prestera_iface iface; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; + + if (__prestera_rif_entry_key_copy(k, &e->key)) + goto err_key_copy; + + e->vr = prestera_vr_get(sw, tb_id, NULL); + if (IS_ERR(e->vr)) + goto err_vr_get; + + e->vr->ref_cnt++; + memcpy(&e->addr, addr, sizeof(e->addr)); + + /* HW */ + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id); + if (err) + goto err_hw_create; + + list_add(&e->router_node, &sw->router->rif_entry_list); + + return e; + +err_hw_create: + e->vr->ref_cnt--; + prestera_vr_put(sw, e->vr); +err_vr_get: +err_key_copy: + kfree(e); +err_kzalloc: + return NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h new file mode 100644 index 000000000000..fed53595f7bb --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_ROUTER_HW_H_ +#define _PRESTERA_ROUTER_HW_H_ + +struct prestera_vr { + struct list_head router_node; + unsigned int ref_cnt; + u32 tb_id; /* key (kernel fib table id) */ + u16 hw_vr_id; /* virtual router ID */ + u8 __pad[2]; +}; + +struct prestera_rif_entry { + struct prestera_rif_entry_key { + struct prestera_iface iface; + } key; + struct prestera_vr *vr; + unsigned char addr[ETH_ALEN]; + u16 hw_id; /* rif_id */ + struct list_head router_node; /* ht */ +}; + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k); +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e); +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr); +int prestera_router_hw_init(struct prestera_switch *sw); + +#endif /* _PRESTERA_ROUTER_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c index 3cafca827bb7..845e9d8c8cc7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_span.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c @@ -7,6 +7,7 @@ #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" +#include "prestera_flow.h" #include "prestera_span.h" struct prestera_span_entry { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 3ce6ccd0f539..b4599fe4ca8d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev, br_port = prestera_bridge_port_add(bridge, port->dev); if (IS_ERR(br_port)) { - err = PTR_ERR(br_port); - goto err_brport_create; + prestera_bridge_put(bridge); + return PTR_ERR(br_port); } err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, @@ -519,8 +519,6 @@ err_port_join: switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); err_switchdev_offload: prestera_bridge_port_put(br_port); -err_brport_create: - prestera_bridge_put(bridge); return err; } @@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused, prestera_port_obj_attr_set); break; default: - err = -EOPNOTSUPP; + return NOTIFY_DONE; } return notifier_from_errno(err); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 1d607bc6b59e..52bef50f5a0d 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1388,7 +1388,6 @@ static int pxa168_eth_probe(struct platform_device *pdev) { struct pxa168_eth_private *pep = NULL; struct net_device *dev = NULL; - struct resource *res; struct clk *clk; struct device_node *np; int err; @@ -1419,9 +1418,11 @@ static int pxa168_eth_probe(struct platform_device *pdev) goto err_netdev; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - BUG_ON(!res); - dev->irq = res->start; + err = platform_get_irq(pdev, 0); + if (err == -EPROBE_DEFER) + goto err_netdev; + BUG_ON(dev->irq < 0); + dev->irq = err; dev->netdev_ops = &pxa168_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 0c864e5bf0a6..cf03c67fbf40 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -492,7 +492,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static void skge_get_ring_param(struct net_device *dev, - struct ethtool_ringparam *p) + struct ethtool_ringparam *p, + struct kernel_ethtool_ringparam *kernel_p, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); @@ -504,7 +506,9 @@ static void skge_get_ring_param(struct net_device *dev, } static int skge_set_ring_param(struct net_device *dev, - struct ethtool_ringparam *p) + struct ethtool_ringparam *p, + struct kernel_ethtool_ringparam *kernel_p, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); int err = 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 28b5b9341145..ea16b1dd6a98 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4149,7 +4149,9 @@ static unsigned long roundup_ring_size(unsigned long pending) } static void sky2_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); @@ -4161,7 +4163,9 @@ static void sky2_get_ringparam(struct net_device *dev, } static int sky2_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); @@ -4266,96 +4270,36 @@ static int sky2_get_eeprom_len(struct net_device *dev) return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) -{ - unsigned long start = jiffies; - - while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { - /* Can take up to 10.6 ms for write */ - if (time_after(jiffies, start + HZ/4)) { - dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); - return -ETIMEDOUT; - } - msleep(1); - } - - return 0; -} - -static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, - u16 offset, size_t length) -{ - int rc = 0; - - while (length > 0) { - u32 val; - - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); - rc = sky2_vpd_wait(hw, cap, 0); - if (rc) - break; - - val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); - - memcpy(data, &val, min(sizeof(val), length)); - offset += sizeof(u32); - data += sizeof(u32); - length -= sizeof(u32); - } - - return rc; -} - -static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, - u16 offset, unsigned int length) -{ - unsigned int i; - int rc = 0; - - for (i = 0; i < length; i += sizeof(u32)) { - u32 val = *(u32 *)(data + i); - - sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); - sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); - - rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); - if (rc) - break; - } - return rc; -} - static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); - int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); - - if (!cap) - return -EINVAL; + int rc; eeprom->magic = SKY2_EEPROM_MAGIC; + rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, + data); + if (rc < 0) + return rc; + + eeprom->len = rc; - return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); + return 0; } static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); - int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); - - if (!cap) - return -EINVAL; + int rc; if (eeprom->magic != SKY2_EEPROM_MAGIC) return -EINVAL; - /* Partial writes not supported */ - if ((eeprom->offset & 3) || (eeprom->len & 3)) - return -EINVAL; + rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, + data); - return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); + return rc < 0 ? rc : 0; } static netdev_features_t sky2_fix_features(struct net_device *dev, diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index c357c193378e..86d356b4388d 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK bool "MediaTek devices" - depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST help If you have a Mediatek SoC with ethernet, say Y. @@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" depends on NET_DSA || !NET_DSA + select PINCTRL select PHYLINK select DIMLIB help diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 75d67d1b5f6b..b67b4323cff0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -91,46 +91,96 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth) } dev_err(eth->dev, "mdio: MDIO timeout\n"); - return -1; + return -ETIMEDOUT; } -static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, - u32 phy_register, u32 write_data) +static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, + u32 write_data) { - if (mtk_mdio_busy_wait(eth)) - return -1; + int ret; - write_data &= 0xffff; + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; - mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | - (phy_register << PHY_IAC_REG_SHIFT) | - (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, - MTK_PHY_IAC); + if (phy_reg & MII_ADDR_C45) { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); + } else { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); + } - if (mtk_mdio_busy_wait(eth)) - return -1; + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; return 0; } -static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) +static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) { - u32 d; - - if (mtk_mdio_busy_wait(eth)) - return 0xffff; + int ret; - mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | - (phy_reg << PHY_IAC_REG_SHIFT) | - (phy_addr << PHY_IAC_ADDR_SHIFT), - MTK_PHY_IAC); + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; - if (mtk_mdio_busy_wait(eth)) - return 0xffff; + if (phy_reg & MII_ADDR_C45) { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_READ | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); + } else { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_C22_READ | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); + } - d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; - return d; + return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; } static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, @@ -463,94 +513,8 @@ static void mtk_mac_link_up(struct phylink_config *config, mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } -static void mtk_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_GMII && - !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) && - phy_interface_mode_is_rgmii(state->interface)) && - !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && - !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) && - !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) && - (state->interface == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_8023z(state->interface)))) { - linkmode_zero(supported); - return; - } - - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - - switch (state->interface) { - case PHY_INTERFACE_MODE_TRGMII: - phylink_set(mask, 1000baseT_Full); - break; - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 2500baseX_Full); - break; - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - phylink_set(mask, 1000baseT_Half); - fallthrough; - case PHY_INTERFACE_MODE_SGMII: - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - fallthrough; - case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_RMII: - case PHY_INTERFACE_MODE_REVMII: - case PHY_INTERFACE_MODE_NA: - default: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - break; - } - - if (state->interface == PHY_INTERFACE_MODE_NA) { - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 2500baseX_Full); - } - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); - phylink_set(mask, 1000baseX_Full); - } - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseT_Half); - } - } - - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. - */ - phylink_helper_basex_speed(state); -} - static const struct phylink_mac_ops mtk_phylink_ops = { - .validate = mtk_validate, + .validate = phylink_generic_validate, .mac_pcs_get_state = mtk_mac_pcs_get_state, .mac_an_restart = mtk_mac_an_restart, .mac_config = mtk_mac_config, @@ -583,6 +547,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->name = "mdio"; eth->mii_bus->read = mtk_mdio_read; eth->mii_bus->write = mtk_mdio_write; + eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45; eth->mii_bus->priv = eth; eth->mii_bus->parent = eth->dev; @@ -2297,7 +2262,6 @@ static int mtk_open(struct net_device *dev) /* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { u32 gdm_config = MTK_GDMA_TO_PDMA; - int err; err = mtk_start_dma(eth); if (err) @@ -3009,6 +2973,33 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.dev = ð->netdev[id]->dev; mac->phylink_config.type = PHYLINK_NETDEV; + /* This driver makes use of state->speed/state->duplex in + * mac_config + */ + mac->phylink_config.legacy_pre_march2020 = true; + mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) + phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) + __set_bit(PHY_INTERFACE_MODE_TRGMII, + mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + mac->phylink_config.supported_interfaces); + } phylink = phylink_create(&mac->phylink_config, of_fwnode_handle(mac->of_node), diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 5ef70dd8b49c..c9d42be314b5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -341,11 +341,20 @@ /* PHY Indirect Access Control registers */ #define MTK_PHY_IAC 0x10004 #define PHY_IAC_ACCESS BIT(31) -#define PHY_IAC_READ BIT(19) -#define PHY_IAC_WRITE BIT(18) -#define PHY_IAC_START BIT(16) -#define PHY_IAC_ADDR_SHIFT 20 -#define PHY_IAC_REG_SHIFT 25 +#define PHY_IAC_REG_MASK GENMASK(29, 25) +#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x)) +#define PHY_IAC_ADDR_MASK GENMASK(24, 20) +#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) +#define PHY_IAC_CMD_MASK GENMASK(19, 18) +#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0) +#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1) +#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2) +#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3) +#define PHY_IAC_START_MASK GENMASK(17, 16) +#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0) +#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1) +#define PHY_IAC_DATA_MASK GENMASK(15, 0) +#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x)) #define PHY_IAC_TIMEOUT HZ #define MTK_MAC_MISC 0x1000c diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c index 98b1d3577bcd..d4b482340cb9 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -207,9 +207,6 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe) struct dentry *root; root = debugfs_create_dir("mtk_ppe", NULL); - if (!root) - return -ENOMEM; - debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all); debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 066d79e4ecfc..ed5038d98ef6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, ETHTOOL_LINK_MODE_1000baseT_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, @@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void) MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT); MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); @@ -1141,7 +1141,9 @@ static void mlx4_en_get_pauseparam(struct net_device *dev, } static int mlx4_en_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; @@ -1208,7 +1210,9 @@ out: } static void mlx4_en_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3f6d5c384637..c61dc7ae0c05 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -33,6 +33,7 @@ #include <linux/bpf.h> #include <linux/etherdevice.h> +#include <linux/filter.h> #include <linux/tcp.h> #include <linux/if_vlan.h> #include <linux/delay.h> @@ -2286,9 +2287,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, bool carry_xdp_prog) { struct bpf_prog *xdp_prog; - int i, t; + int i, t, ret; - mlx4_en_copy_priv(tmp, priv, prof); + ret = mlx4_en_copy_priv(tmp, priv, prof); + if (ret) { + en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n", + __func__); + return ret; + } if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, @@ -2422,10 +2428,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - /* device doesn't support time stamping */ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 650e6a1844ae..8cfc649f226b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -812,7 +812,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud trace_xdp_exception(dev, xdp_prog, act); goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(dev, xdp_prog, act); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 92056452a9e3..4ba1a78c6515 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -115,6 +115,7 @@ config MLX5_TC_CT config MLX5_TC_SAMPLE bool "MLX5 TC sample offload support" depends on MLX5_CLS_ACT + depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m default y help Say Y here if you want to support offloading sample rules via tc diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index e63bb9ceb9c0..fcfd38fa9e6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ en/tc/post_act.o en/tc/int_port.o + +mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \ + en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \ + en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \ + en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \ + en/tc/act/mirred.o en/tc/act/mirred_nic.o \ + en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \ + en/tc/act/redirect_ingress.o + mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o @@ -95,11 +104,12 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_ste.o steering/dr_send.o \ steering/dr_ste_v0.o steering/dr_ste_v1.o \ steering/dr_cmd.o steering/dr_fw.o \ - steering/dr_action.o steering/fs_dr.o + steering/dr_action.o steering/fs_dr.o \ + steering/dr_dbg.o # # SF device # -mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o +mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_affinity.o # # SF manager diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f71ec4d9d68e..17fe05809653 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -148,8 +148,12 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) if (!refcount_dec_and_test(&ent->refcnt)) return; - if (ent->idx >= 0) - cmd_free_index(ent->cmd, ent->idx); + if (ent->idx >= 0) { + struct mlx5_cmd *cmd = ent->cmd; + + cmd_free_index(cmd, ent->idx); + up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + } cmd_free_ent(ent); } @@ -339,6 +343,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: case MLX5_CMD_OP_DEALLOC_SF: + case MLX5_CMD_OP_DESTROY_UCTX: + case MLX5_CMD_OP_DESTROY_UMEM: + case MLX5_CMD_OP_MODIFY_RQT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -444,7 +451,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_CREATE_RQT: - case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_CREATE_FLOW_TABLE: @@ -464,9 +470,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_UCTX: - case MLX5_CMD_OP_DESTROY_UCTX: case MLX5_CMD_OP_CREATE_UMEM: - case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: @@ -900,25 +904,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) return cmd->allowed_opcode == opcode; } -static int cmd_alloc_index_retry(struct mlx5_cmd *cmd) -{ - unsigned long alloc_end = jiffies + msecs_to_jiffies(1000); - int idx; - -retry: - idx = cmd_alloc_index(cmd); - if (idx < 0 && time_before(jiffies, alloc_end)) { - /* Index allocation can fail on heavy load of commands. This is a temporary - * situation as the current command already holds the semaphore, meaning that - * another command completion is being handled and it is expected to release - * the entry index soon. - */ - cpu_relax(); - goto retry; - } - return idx; -} - bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) { return pci_channel_offline(dev->pdev) || @@ -946,7 +931,7 @@ static void cmd_work_handler(struct work_struct *work) sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index_retry(cmd); + alloc_ret = cmd_alloc_index(cmd); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -1602,8 +1587,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force vector = vec & 0xffffffff; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { - struct semaphore *sem; - ent = cmd->ent_arr[i]; /* if we already completed the command, ignore it */ @@ -1626,10 +1609,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) cmd_ent_put(ent); - if (ent->page_queue) - sem = &cmd->pages_sem; - else - sem = &cmd->sem; ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); dump_command(dev, ent, 0); @@ -1683,7 +1662,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force */ complete(&ent->done); } - up(sem); } } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 02e77ffe5c3e..5371ad0a12eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); MLX5_SET(destroy_cq_in, in, uid, cq->uid); err = mlx5_cmd_exec_in(dev, destroy_cq, in); + if (err) + return err; synchronize_irq(cq->irqn); - mlx5_cq_put(cq); wait_for_completion(&cq->free); - return err; + return 0; } EXPORT_SYMBOL(mlx5_core_destroy_cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 07c8d9811bc8..10d195042ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return; - if (cq->dbg) + if (cq->dbg) { rem_res_tree(cq->dbg); + cq->dbg = NULL; + } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a8b84d53dfb0..ba6dad97e308 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -538,7 +538,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) return add_drivers(dev); } -static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) +bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) { u64 fsystem_guid, psystem_guid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 1c98652b244a..d1093bb2d436 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -546,6 +546,13 @@ static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 return 0; } +static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL; +} + static const struct devlink_param mlx5_devlink_params[] = { DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, @@ -570,6 +577,10 @@ static const struct devlink_param mlx5_devlink_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME), mlx5_devlink_enable_remote_dev_reset_get, mlx5_devlink_enable_remote_dev_reset_set, NULL), + DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_eq_depth_validate), + DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_eq_depth_validate), }; static void mlx5_devlink_set_params_init_values(struct devlink *devlink) @@ -608,6 +619,16 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) value); } #endif + + value.vu32 = MLX5_COMP_EQ_SIZE; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, + value); + + value.vu32 = MLX5_NUM_ASYNC_EQE; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, + value); } static const struct devlink_param enable_eth_param = @@ -752,6 +773,66 @@ static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink) mlx5_devlink_eth_param_unregister(devlink); } +static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (val.vu32 == 0) { + NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0"); + return -EINVAL; + } + + if (!is_power_of_2(val.vu32)) { + NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs"); + return -EINVAL; + } + + if (ilog2(val.vu32) > + MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) { + NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range"); + return -EINVAL; + } + + return 0; +} + +static const struct devlink_param max_uc_list_param = + DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_max_uc_list_validate); + +static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported)) + return 0; + + err = devlink_param_register(devlink, &max_uc_list_param); + if (err) + return err; + + value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + value); + return 0; +} + +static void +mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported)) + return; + + devlink_param_unregister(devlink, &max_uc_list_param); +} + #define MLX5_TRAP_DROP(_id, _group_id) \ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ @@ -811,6 +892,10 @@ int mlx5_devlink_register(struct devlink *devlink) if (err) goto auxdev_reg_err; + err = mlx5_devlink_max_uc_list_param_register(devlink); + if (err) + goto max_uc_list_err; + err = mlx5_devlink_traps_register(devlink); if (err) goto traps_reg_err; @@ -821,6 +906,8 @@ int mlx5_devlink_register(struct devlink *devlink) return 0; traps_reg_err: + mlx5_devlink_max_uc_list_param_unregister(devlink); +max_uc_list_err: mlx5_devlink_auxdev_params_unregister(devlink); auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, @@ -831,6 +918,7 @@ auxdev_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); + mlx5_devlink_max_uc_list_param_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f0ac6b0d9653..812e6810cb3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -145,7 +145,6 @@ struct page_pool; #define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) -#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ @@ -173,7 +172,7 @@ struct page_pool; #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) -#define MLX5E_MAX_KLM_PER_WQE(mdev) \ +#define MLX5E_MAX_KLM_PER_WQE \ MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE) #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -783,6 +782,8 @@ struct mlx5e_channel { DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); int ix; int cpu; + /* Sync between icosq recovery and XSK enable/disable. */ + struct mutex icosq_recovery_lock; }; struct mlx5e_ptp; @@ -875,10 +876,8 @@ struct mlx5e_trap; struct mlx5e_priv { /* priv data path fields - start */ - /* +1 for port ptp ts */ - struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC + - MLX5E_QOS_MAX_LEAF_NODES]; - int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; + struct mlx5e_txqsq **txq2sq; + int **channel_tc2realtxq; int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; @@ -893,7 +892,7 @@ struct mlx5e_priv { struct mlx5e_channels channels; u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; struct mlx5e_rx_res *rx_res; - u32 tx_rates[MLX5E_MAX_NUM_SQS]; + u32 *tx_rates; struct mlx5e_flow_steering fs; @@ -909,7 +908,7 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_trap *en_trap; struct mlx5e_stats stats; - struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_channel_stats **channel_stats; struct mlx5e_channel_stats trap_stats; struct mlx5e_ptp_stats ptp_stats; u16 stats_nch; @@ -956,6 +955,12 @@ struct mlx5e_rx_handlers { extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; +enum mlx5e_profile_feature { + MLX5E_PROFILE_FEATURE_PTP_RX, + MLX5E_PROFILE_FEATURE_PTP_TX, + MLX5E_PROFILE_FEATURE_QOS_HTB, +}; + struct mlx5e_profile { int (*init)(struct mlx5_core_dev *mdev, struct net_device *netdev); @@ -969,14 +974,18 @@ struct mlx5e_profile { int (*update_rx)(struct mlx5e_priv *priv); void (*update_stats)(struct mlx5e_priv *priv); void (*update_carrier)(struct mlx5e_priv *priv); + int (*max_nch_limit)(struct mlx5_core_dev *mdev); unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); mlx5e_stats_grp_t *stats_grps; const struct mlx5e_rx_handlers *rx_handlers; int max_tc; u8 rq_groups; - bool rx_ptp_support; + u32 features; }; +#define mlx5e_profile_feature_cap(profile, feature) \ + ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature)) + void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); @@ -1014,9 +1023,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); void mlx5e_destroy_rq(struct mlx5e_rq *rq); struct mlx5e_sq_param; -int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_sq_param *param, struct mlx5e_icosq *sq); -void mlx5e_close_icosq(struct mlx5e_icosq *sq); int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, struct mlx5e_xdpsq *sq, bool is_redirect); @@ -1057,7 +1063,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, mlx5e_fp_preactivate preactivate, void *context, bool reset); int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); -int mlx5e_num_channels_changed(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); @@ -1148,9 +1153,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, struct ethtool_channels *ch); int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, - struct ethtool_coalesce *coal); + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal); int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, - struct ethtool_coalesce *coal); + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, struct ethtool_link_ksettings *link_ksettings); int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, @@ -1186,8 +1194,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev); void mlx5e_priv_cleanup(struct mlx5e_priv *priv); struct net_device * -mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, - unsigned int txqs, unsigned int rxqs); +mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile); int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index d5b7110a4265..0107e4e73bb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); +void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c); +void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c); #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c index d290d7276b8d..b4f3bd7d346e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c @@ -20,7 +20,7 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch, struct mlx5e_channel_stats *stats; int tc; - stats = &priv->channel_stats[ch]; + stats = priv->channel_stats[ch]; data->rx_packets = stats->rq.packets; data->rx_bytes = stats->rq.bytes; @@ -120,14 +120,14 @@ static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent) cancel_delayed_work_sync(&priv->stats_agent.work); } -int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) +void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) { int buf_len = mlx5e_hv_vhca_stats_buf_size(priv); struct mlx5_hv_vhca_agent *agent; priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL); if (!priv->stats_agent.buf) - return -ENOMEM; + return; agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca, MLX5_HV_VHCA_AGENT_STATS, @@ -142,13 +142,11 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) PTR_ERR(agent)); kvfree(priv->stats_agent.buf); - return IS_ERR_OR_NULL(agent); + return; } priv->stats_agent.agent = agent; INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work); - - return 0; } void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h index 664463faf77b..29c8c6d3260f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h @@ -7,19 +7,12 @@ #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) -int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv); +void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv); void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv); #else - -static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) -{ - return 0; -} - -static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) -{ -} +static inline void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) {} +static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) {} #endif #endif /* __MLX5_EN_STATS_VHCA_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c index 7edde4d536fd..17325c5d6516 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c @@ -155,3 +155,61 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh) return mh->modify_hdr; } +char * +mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) +{ + int new_num_actions, max_hw_actions; + size_t new_sz, old_sz; + void *ret; + + if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions) + goto out; + + max_hw_actions = mlx5e_mod_hdr_max_actions(mdev, namespace); + new_num_actions = min(max_hw_actions, + mod_hdr_acts->actions ? + mod_hdr_acts->max_actions * 2 : 1); + if (mod_hdr_acts->max_actions == new_num_actions) + return ERR_PTR(-ENOSPC); + + new_sz = MLX5_MH_ACT_SZ * new_num_actions; + old_sz = mod_hdr_acts->max_actions * MLX5_MH_ACT_SZ; + + if (mod_hdr_acts->is_static) { + ret = kzalloc(new_sz, GFP_KERNEL); + if (ret) { + memcpy(ret, mod_hdr_acts->actions, old_sz); + mod_hdr_acts->is_static = false; + } + } else { + ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL); + if (ret) + memset(ret + old_sz, 0, new_sz - old_sz); + } + if (!ret) + return ERR_PTR(-ENOMEM); + + mod_hdr_acts->actions = ret; + mod_hdr_acts->max_actions = new_num_actions; + +out: + return mod_hdr_acts->actions + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ); +} + +void +mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) +{ + if (!mod_hdr_acts->is_static) + kfree(mod_hdr_acts->actions); + + mod_hdr_acts->actions = NULL; + mod_hdr_acts->num_actions = 0; + mod_hdr_acts->max_actions = 0; +} + +char * +mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos) +{ + return mod_hdr_acts->actions + (pos * MLX5_MH_ACT_SZ); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h index 33b23d8f9182..b8dac418d0a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h @@ -7,14 +7,32 @@ #include <linux/hashtable.h> #include <linux/mlx5/fs.h> +#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) + struct mlx5e_mod_hdr_handle; struct mlx5e_tc_mod_hdr_acts { int num_actions; int max_actions; + bool is_static; void *actions; }; +#define DECLARE_MOD_HDR_ACTS_ACTIONS(name, len) \ + u8 name[len][MLX5_MH_ACT_SZ] = {} + +#define DECLARE_MOD_HDR_ACTS(name, acts_arr) \ + struct mlx5e_tc_mod_hdr_acts name = { \ + .max_actions = ARRAY_SIZE(acts_arr), \ + .is_static = true, \ + .actions = acts_arr, \ + } + +char *mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); +void mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); +char *mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos); + struct mlx5e_mod_hdr_handle * mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev, struct mod_hdr_tbl *tbl, @@ -28,4 +46,12 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh); void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl); void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl); +static inline int mlx5e_mod_hdr_max_actions(struct mlx5_core_dev *mdev, int namespace) +{ + if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ + return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions); + else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ + return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); +} + #endif /* __MLX5E_EN_MOD_HDR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index f8c29022dbb2..66180ffb4606 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE; max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; rest = max_hd_per_wqe % max_klm_per_umr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 18d542b1c5cb..82baafd3c00c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -768,7 +768,7 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv) { struct mlx5e_ptp_fs *ptp_fs; - if (!priv->profile->rx_ptp_support) + if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) return 0; ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL); @@ -783,7 +783,7 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv) { struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; - if (!priv->profile->rx_ptp_support) + if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) return; mlx5e_ptp_rx_unset_fs(priv); @@ -794,7 +794,7 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set) { struct mlx5e_ptp *c = priv->channels.ptp; - if (!priv->profile->rx_ptp_support) + if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) return 0; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 50977f01a050..00449df98a5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ +#include <net/sch_generic.h> #include "en.h" #include "params.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index fcb0892c08a9..0991345c4ae5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -517,6 +517,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void * void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { + if (!netdev) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_BLOCK: return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h index d6c7c81690eb..7c9dd3a75f8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h @@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, static inline void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, - struct sk_buff *skb) {} + struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); } #endif /* CONFIG_MLX5_CLS_ACT */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 74086eb556ae..2684e9da9f41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq) static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) { + struct mlx5e_rq *xskrq = NULL; struct mlx5_core_dev *mdev; struct mlx5e_icosq *icosq; struct net_device *dev; @@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) int err; icosq = ctx; + + mutex_lock(&icosq->channel->icosq_recovery_lock); + + /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */ rq = &icosq->channel->rq; + if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state)) + xskrq = &icosq->channel->xskrq; mdev = icosq->channel->mdev; dev = icosq->channel->netdev; err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); @@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) goto out; mlx5e_deactivate_rq(rq); + if (xskrq) + mlx5e_deactivate_rq(xskrq); + err = mlx5e_wait_for_icosq_flush(icosq); if (err) goto out; @@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) goto out; mlx5e_reset_icosq_cc_pc(icosq); + mlx5e_free_rx_in_progress_descs(rq); + if (xskrq) + mlx5e_free_rx_in_progress_descs(xskrq); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); mlx5e_activate_icosq(icosq); - mlx5e_activate_rq(rq); + mlx5e_activate_rq(rq); rq->stats->recover++; + + if (xskrq) { + mlx5e_activate_rq(xskrq); + xskrq->stats->recover++; + } + + mutex_unlock(&icosq->channel->icosq_recovery_lock); + return 0; out: clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); + mutex_unlock(&icosq->channel->icosq_recovery_lock); return err; } @@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq) mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); } +void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c) +{ + mutex_lock(&c->icosq_recovery_lock); +} + +void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c) +{ + mutex_unlock(&c->icosq_recovery_lock); +} + static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { .name = "rx", .recover = mlx5e_rx_reporter_recover, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 4f4bc8726ec4..60bc5b577ab9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms return mlx5e_health_fmsg_named_obj_nest_end(fmsg); } +static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, + void *ctx) +{ + struct mlx5e_tx_timeout_ctx *to_ctx = ctx; + + return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq); +} + static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { @@ -561,11 +569,11 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) to_ctx.sq = sq; err_ctx.ctx = &to_ctx; err_ctx.recover = mlx5e_tx_reporter_timeout_recover; - err_ctx.dump = mlx5e_tx_reporter_dump_sq; + err_ctx.dump = mlx5e_tx_reporter_timeout_dump; snprintf(err_str, sizeof(err_str), "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, - jiffies_to_usecs(jiffies - sq->txq->trans_start)); + jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start))); mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); return to_ctx.status; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index 142953847996..24c32f73040a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -13,6 +13,9 @@ struct mlx5e_rx_res { unsigned int max_nch; u32 drop_rqn; + struct mlx5e_packet_merge_param pkt_merge_param; + struct rw_semaphore pkt_merge_param_sem; + struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; bool rss_active; u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; @@ -34,7 +37,6 @@ struct mlx5e_rx_res { /* API for rx_res_rss_* */ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, - const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int init_nch) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; @@ -49,7 +51,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, return -ENOMEM; err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, - init_pkt_merge_param); + &res->pkt_merge_param); if (err) goto err_rss_free; @@ -274,8 +276,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); } -static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, - const struct mlx5e_packet_merge_param *init_pkt_merge_param) +static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; struct mlx5e_tir_builder *builder; @@ -306,7 +307,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), inner_ft_support); - mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param); + mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); @@ -336,7 +337,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), inner_ft_support); - mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param); + mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true); @@ -392,6 +393,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) if (err) goto out; + /* Separated from the channels RQs, does not share pkt_merge state with them */ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, mlx5e_rqt_get_rqtn(&res->ptp.rqt), inner_ft_support); @@ -447,11 +449,14 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, res->max_nch = max_nch; res->drop_rqn = drop_rqn; - err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch); + res->pkt_merge_param = *init_pkt_merge_param; + init_rwsem(&res->pkt_merge_param_sem); + + err = mlx5e_rx_res_rss_init_def(res, init_nch); if (err) goto err_out; - err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param); + err = mlx5e_rx_res_channels_init(res); if (err) goto err_rss_destroy; @@ -513,7 +518,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) return mlx5e_tir_get_tirn(&res->ptp.tir); } -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) { return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); } @@ -656,6 +661,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, if (!builder) return -ENOMEM; + down_write(&res->pkt_merge_param_sem); + res->pkt_merge_param = *pkt_merge_param; + mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); final_err = 0; @@ -681,6 +689,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res, } } + up_write(&res->pkt_merge_param_sem); mlx5e_tir_builder_free(builder); return final_err; } @@ -689,3 +698,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * { return mlx5e_rss_get_hash(res->rss[0]); } + +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + u32 rqtn; + int err; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq); + + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn, + inner_ft_support); + mlx5e_tir_builder_build_direct(builder); + mlx5e_tir_builder_build_tls(builder); + down_read(&res->pkt_merge_param_sem); + mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); + err = mlx5e_tir_init(tir, builder, res->mdev, false); + up_read(&res->pkt_merge_param_sem); + + mlx5e_tir_builder_free(builder); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index d09f7d174a51..b39b20a720e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); -/* RQTN getters for modules that create their own TIRs */ -u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); - /* Activate/deactivate API */ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); @@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); /* Workaround for hairpin */ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); +/* Accel TIRs */ +int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq, + struct mlx5e_tir *tir); #endif /* __MLX5_EN_RX_RES_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c new file mode 100644 index 000000000000..b0de6b999675 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_accept = { + .can_offload = tc_act_can_offload_accept, + .parse_action = tc_act_parse_accept, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c new file mode 100644 index 000000000000..e600924e30ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "mlx5_core.h" + +/* Must be aligned with enum flow_action_id. */ +static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { + &mlx5e_tc_act_accept, + &mlx5e_tc_act_drop, + &mlx5e_tc_act_trap, + &mlx5e_tc_act_goto, + &mlx5e_tc_act_mirred, + &mlx5e_tc_act_mirred, + &mlx5e_tc_act_redirect_ingress, + NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ + &mlx5e_tc_act_vlan, + &mlx5e_tc_act_vlan, + &mlx5e_tc_act_vlan_mangle, + &mlx5e_tc_act_tun_encap, + &mlx5e_tc_act_tun_decap, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_csum, + NULL, /* FLOW_ACTION_MARK, */ + &mlx5e_tc_act_ptype, + NULL, /* FLOW_ACTION_PRIORITY, */ + NULL, /* FLOW_ACTION_WAKE, */ + NULL, /* FLOW_ACTION_QUEUE, */ + &mlx5e_tc_act_sample, + NULL, /* FLOW_ACTION_POLICE, */ + &mlx5e_tc_act_ct, + NULL, /* FLOW_ACTION_CT_METADATA, */ + &mlx5e_tc_act_mpls_push, + &mlx5e_tc_act_mpls_pop, +}; + +/* Must be aligned with enum flow_action_id. */ +static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = { + &mlx5e_tc_act_accept, + &mlx5e_tc_act_drop, + NULL, /* FLOW_ACTION_TRAP, */ + &mlx5e_tc_act_goto, + &mlx5e_tc_act_mirred_nic, + NULL, /* FLOW_ACTION_MIRRED, */ + NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */ + NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ + NULL, /* FLOW_ACTION_VLAN_PUSH, */ + NULL, /* FLOW_ACTION_VLAN_POP, */ + NULL, /* FLOW_ACTION_VLAN_MANGLE, */ + NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */ + NULL, /* FLOW_ACTION_TUNNEL_DECAP, */ + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_pedit, + &mlx5e_tc_act_csum, + &mlx5e_tc_act_mark, + NULL, /* FLOW_ACTION_PTYPE, */ + NULL, /* FLOW_ACTION_PRIORITY, */ + NULL, /* FLOW_ACTION_WAKE, */ + NULL, /* FLOW_ACTION_QUEUE, */ + NULL, /* FLOW_ACTION_SAMPLE, */ + NULL, /* FLOW_ACTION_POLICE, */ + &mlx5e_tc_act_ct, +}; + +/** + * mlx5e_tc_act_get() - Get an action parser for an action id. + * @act_id: Flow action id. + * @ns_type: flow namespace type. + */ +struct mlx5e_tc_act * +mlx5e_tc_act_get(enum flow_action_id act_id, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5e_tc_act **tc_acts; + + tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic; + + return tc_acts[act_id]; +} + +/** + * mlx5e_tc_act_init_parse_state() - Init a new parse_state. + * @parse_state: Parsing state. + * @flow: mlx5e tc flow being handled. + * @flow_action: flow action to parse. + * @extack: to set an error msg. + * + * The same parse_state should be passed to action parsers + * for tracking the current parsing state. + */ +void +mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_tc_flow *flow, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + memset(parse_state, 0, sizeof(*parse_state)); + parse_state->flow = flow; + parse_state->num_actions = flow_action->num_entries; + parse_state->extack = extack; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h new file mode 100644 index 000000000000..26efa33de56f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_H__ +#define __MLX5_EN_TC_ACT_H__ + +#include <net/tc_act/tc_pedit.h> +#include <net/flow_offload.h> +#include <linux/netlink.h> +#include "eswitch.h" +#include "pedit.h" + +struct mlx5_flow_attr; + +struct mlx5e_tc_act_parse_state { + unsigned int num_actions; + struct mlx5e_tc_flow *flow; + struct netlink_ext_ack *extack; + bool encap; + bool decap; + bool mpls_push; + bool ptype_host; + const struct ip_tunnel_info *tun_info; + struct pedit_headers_action hdrs[__PEDIT_CMD_MAX]; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; + int if_count; + struct mlx5_tc_ct_priv *ct_priv; +}; + +struct mlx5e_tc_act { + bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index); + + int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr); + + int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr); +}; + +extern struct mlx5e_tc_act mlx5e_tc_act_drop; +extern struct mlx5e_tc_act mlx5e_tc_act_trap; +extern struct mlx5e_tc_act mlx5e_tc_act_accept; +extern struct mlx5e_tc_act mlx5e_tc_act_mark; +extern struct mlx5e_tc_act mlx5e_tc_act_goto; +extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap; +extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap; +extern struct mlx5e_tc_act mlx5e_tc_act_csum; +extern struct mlx5e_tc_act mlx5e_tc_act_pedit; +extern struct mlx5e_tc_act mlx5e_tc_act_vlan; +extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle; +extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push; +extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop; +extern struct mlx5e_tc_act mlx5e_tc_act_mirred; +extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic; +extern struct mlx5e_tc_act mlx5e_tc_act_ct; +extern struct mlx5e_tc_act mlx5e_tc_act_sample; +extern struct mlx5e_tc_act mlx5e_tc_act_ptype; +extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress; + +struct mlx5e_tc_act * +mlx5e_tc_act_get(enum flow_action_id act_id, + enum mlx5_flow_namespace_type ns_type); + +void +mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_tc_flow *flow, + struct flow_action *flow_action, + struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c new file mode 100644 index 000000000000..29920ef0180a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/tc_act/tc_csum.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +csum_offload_supported(struct mlx5e_priv *priv, + u32 action, + u32 update_flags, + struct netlink_ext_ack *extack) +{ + u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | + TCA_CSUM_UPDATE_FLAG_UDP; + + /* The HW recalcs checksums only if re-writing headers */ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + NL_SET_ERR_MSG_MOD(extack, + "TC csum action is only offloaded with pedit"); + netdev_warn(priv->netdev, + "TC csum action is only offloaded with pedit\n"); + return false; + } + + if (update_flags & ~prot_flags) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload TC csum action for some header/s"); + netdev_warn(priv->netdev, + "can't offload TC csum action for some header/s - flags %#x\n", + update_flags); + return false; + } + + return true; +} + +static bool +tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct mlx5e_tc_flow *flow = parse_state->flow; + + return csum_offload_supported(flow->priv, flow->attr->action, + act->csum_flags, parse_state->extack); +} + +static int +tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_csum = { + .can_offload = tc_act_can_offload_csum, + .parse_action = tc_act_parse_csum, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c new file mode 100644 index 000000000000..06ec30cdb269 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "en/tc_ct.h" + +static bool +tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (flow_flag_test(parse_state->flow, SAMPLE)) { + NL_SET_ERR_MSG_MOD(extack, + "Sample action with connection tracking is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + int err; + + err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, + &attr->parse_attr->mod_hdr_acts, + act, parse_state->extack); + if (err) + return err; + + flow_flag_set(parse_state->flow, CT); + + if (mlx5e_is_eswitch_flow(parse_state->flow)) + attr->esw_attr->split_count = attr->esw_attr->out_count; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_ct = { + .can_offload = tc_act_can_offload_ct, + .parse_action = tc_act_parse_ct, +}; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c new file mode 100644 index 000000000000..2e29a23bed12 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_drop = { + .can_offload = tc_act_can_offload_drop, + .parse_action = tc_act_parse_drop, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c new file mode 100644 index 000000000000..f44515061228 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" +#include "eswitch.h" + +static int +validate_goto_chain(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + bool is_esw = mlx5e_is_eswitch_flow(flow); + bool ft_flow = mlx5e_is_ft_flow(flow); + u32 dest_chain = act->chain_index; + struct mlx5_fs_chains *chains; + struct mlx5_eswitch *esw; + u32 reformat_and_fwd; + u32 max_chain; + + esw = priv->mdev->priv.eswitch; + chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv); + max_chain = mlx5_chains_get_chain_range(chains); + reformat_and_fwd = is_esw ? + MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) : + MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table); + + if (ft_flow) { + NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); + return -EOPNOTSUPP; + } + + if (!mlx5_chains_backwards_supported(chains) && + dest_chain <= flow->attr->chain) { + NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported"); + return -EOPNOTSUPP; + } + + if (dest_chain > max_chain) { + NL_SET_ERR_MSG_MOD(extack, + "Requested destination chain is out of supported range"); + return -EOPNOTSUPP; + } + + if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | + MLX5_FLOW_CONTEXT_ACTION_DECAP) && + !reformat_and_fwd) { + NL_SET_ERR_MSG_MOD(extack, + "Goto chain is not allowed if action has reformat or decap"); + return -EOPNOTSUPP; + } + + return 0; +} + +static bool +tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + + if (validate_goto_chain(flow->priv, flow, act, extack)) + return false; + + return true; +} + +static int +tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->dest_chain = act->chain_index; + + return 0; +} + +static int +tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + + if (!attr->dest_chain) + return 0; + + if (parse_state->decap) { + /* It can be supported if we'll create a mapping for + * the tunnel device only (without tunnel), and set + * this tunnel id with this decap flow. + * + * On restore (miss), we'll just set this saved tunnel + * device. + */ + + NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported"); + netdev_warn(priv->netdev, "Decap with goto isn't supported"); + return -EOPNOTSUPP; + } + + if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) { + NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_goto = { + .can_offload = tc_act_can_offload_goto, + .parse_action = tc_act_parse_goto, + .post_parse = tc_act_post_parse_goto, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c new file mode 100644 index 000000000000..d775c3d9edf3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en_tc.h" + +static bool +tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) { + NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->nic_attr->flow_tag = act->mark; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mark = { + .can_offload = tc_act_can_offload_mark, + .parse_action = tc_act_parse_mark, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c new file mode 100644 index 000000000000..c614fc7fdc9c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_macvlan.h> +#include <linux/if_vlan.h> +#include <net/bareudp.h> +#include <net/bonding.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_tun_encap.h" +#include "en/tc_priv.h" +#include "en_rep.h" + +static bool +same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev) +{ + return mlx5e_eswitch_vf_rep(priv->netdev) && + priv->netdev == out_dev; +} + +static int +verify_uplink_forwarding(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device *out_dev, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *rep_priv; + + /* Forwarding non encapsulated traffic between + * uplink ports is allowed only if + * termination_table_raw_traffic cap is set. + * + * Input vport was stored attr->in_rep. + * In LAG case, *priv* is the private data of + * uplink which may be not the input vport. + */ + rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep); + + if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) && + mlx5e_eswitch_uplink_rep(out_dev))) + return 0; + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, + termination_table_raw_traffic)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are both uplink, can't offload forwarding"); + return -EOPNOTSUPP; + } else if (out_dev != rep_priv->netdev) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not the same uplink, can't offload forwarding"); + return -EOPNOTSUPP; + } + return 0; +} + +static bool +is_duplicated_output_device(struct net_device *dev, + struct net_device *out_dev, + int *ifindexes, int if_count, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < if_count; i++) { + if (ifindexes[i] == out_dev->ifindex) { + NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device"); + netdev_err(dev, "can't duplicate output to same device: %s\n", + out_dev->name); + return true; + } + } + + return false; +} + +static struct net_device * +get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev) +{ + struct net_device *fdb_out_dev = out_dev; + struct net_device *uplink_upper; + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); + if (uplink_upper && netif_is_lag_master(uplink_upper) && + uplink_upper == out_dev) { + fdb_out_dev = uplink_dev; + } else if (netif_is_lag_master(out_dev)) { + fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev)); + if (fdb_out_dev && + (!mlx5e_eswitch_rep(fdb_out_dev) || + !netdev_port_same_parent_id(fdb_out_dev, uplink_dev))) + fdb_out_dev = NULL; + } + rcu_read_unlock(); + return fdb_out_dev; +} + +static bool +tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct net_device *out_dev = act->dev; + struct mlx5e_priv *priv = flow->priv; + struct mlx5_esw_flow_attr *esw_attr; + + parse_attr = flow->attr->parse_attr; + esw_attr = flow->attr->esw_attr; + + if (!out_dev) { + /* out_dev is NULL when filters with + * non-existing mirred device are replayed to + * the driver. + */ + return false; + } + + if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device"); + return false; + } + + if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) { + /* Ignore forward to self rules generated + * by adding both mlx5 devs to the flow table + * block on a normal nft offload setup. + */ + return false; + } + + if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); + netdev_warn(priv->netdev, + "can't support more than %d output ports, can't offload forwarding\n", + esw_attr->out_count); + return false; + } + + if (parse_state->encap || + netdev_port_same_parent_id(priv->netdev, out_dev) || + netif_is_ovs_master(out_dev)) + return true; + + if (parse_attr->filter_dev != priv->netdev) { + /* All mlx5 devices are called to configure + * high level device filters. Therefore, the + * *attempt* to install a filter on invalid + * eswitch should not trigger an explicit error + */ + return false; + } + + NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding"); + + return false; +} + +static int +parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + + parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex; + parse_attr->tun_info[esw_attr->out_count] = + mlx5e_dup_tun_info(parse_state->tun_info); + + if (!parse_attr->tun_info[esw_attr->out_count]) + return -ENOMEM; + + parse_state->encap = false; + esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; + esw_attr->out_count++; + /* attr->dests[].rep is resolved when we handle encap */ + + return 0; +} + +static int +parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct net_device *out_dev = act->dev; + struct net_device *uplink_dev; + struct mlx5e_priv *out_priv; + struct mlx5_eswitch *esw; + int *ifindexes; + int if_count; + int err; + + esw = priv->mdev->priv.eswitch; + uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + ifindexes = parse_state->ifindexes; + if_count = parse_state->if_count; + + if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack)) + return -EOPNOTSUPP; + + parse_state->ifindexes[if_count] = out_dev->ifindex; + parse_state->if_count++; + + out_dev = get_fdb_out_dev(uplink_dev, out_dev); + if (!out_dev) + return -ENODEV; + + if (is_vlan_dev(out_dev)) { + err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack); + if (err) + return err; + } + + if (is_vlan_dev(parse_attr->filter_dev)) { + err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack); + if (err) + return err; + } + + if (netif_is_macvlan(out_dev)) + out_dev = macvlan_dev_real_dev(out_dev); + + err = verify_uplink_forwarding(priv, attr, out_dev, extack); + if (err) + return err; + + if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); + return -EOPNOTSUPP; + } + + if (same_vf_reps(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself"); + return -EOPNOTSUPP; + } + + out_priv = netdev_priv(out_dev); + rpriv = out_priv->ppriv; + esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; + esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; + esw_attr->out_count++; + + return 0; +} + +static int +parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + int err; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, + MLX5E_TC_INT_PORT_EGRESS, + &attr->action, esw_attr->out_count); + if (err) + return err; + + esw_attr->out_count++; + return 0; +} + +static int +tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct net_device *out_dev = act->dev; + int err = -EOPNOTSUPP; + + if (parse_state->encap) + err = parse_mirred_encap(parse_state, act, attr); + else if (netdev_port_same_parent_id(priv->netdev, out_dev)) + err = parse_mirred(parse_state, act, priv, attr); + else if (netif_is_ovs_master(out_dev)) + err = parse_mirred_ovs_master(parse_state, act, priv, attr); + + if (err) + return err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mirred = { + .can_offload = tc_act_can_offload_mirred, + .parse_action = tc_act_parse_mirred, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c new file mode 100644 index 000000000000..2c74567b6d25 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct net_device *out_dev = act->dev; + struct mlx5e_priv *priv = flow->priv; + + if (act->id != FLOW_ACTION_REDIRECT) + return false; + + if (priv->netdev->netdev_ops != out_dev->netdev_ops || + !mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); + netdev_warn(priv->netdev, + "devices %s %s not on same switch HW, can't offload forwarding\n", + netdev_name(priv->netdev), + out_dev->name); + return false; + } + + return true; +} + +static int +tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex; + flow_flag_set(parse_state->flow, HAIRPIN); + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = { + .can_offload = tc_act_can_offload_mirred_nic, + .parse_action = tc_act_parse_mirred_nic, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c new file mode 100644 index 000000000000..784fc4f68b1e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <net/bareudp.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_priv *priv = parse_state->flow->priv; + + if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) || + act->mpls_push.proto != htons(ETH_P_MPLS_UC)) { + NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol"); + return false; + } + + return true; +} + +static int +tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->mpls_push = true; + + return 0; +} + +static bool +tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct net_device *filter_dev; + + filter_dev = flow->attr->parse_attr->filter_dev; + + /* we only support mpls pop if it is the first action + * and the filter net device is bareudp. Subsequent + * actions can be pedit and the last can be mirred + * egress redirect. + */ + if (act_index) { + NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action"); + return false; + } + + if (!netif_is_bareudp(filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices"); + return false; + } + + return true; +} + +static int +tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->parse_attr->eth.h_proto = act->mpls_pop.proto; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_flag_set(parse_state->flow, L3_TO_L2_DECAP); + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_mpls_push = { + .can_offload = tc_act_can_offload_mpls_push, + .parse_action = tc_act_parse_mpls_push, +}; + +struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = { + .can_offload = tc_act_can_offload_mpls_pop, + .parse_action = tc_act_parse_mpls_pop, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c new file mode 100644 index 000000000000..79addbbef087 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "pedit.h" +#include "en/tc_priv.h" +#include "en/mod_hdr.h" + +static int pedit_header_offsets[] = { + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), +}; + +#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) + +static int +set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + u32 *curr_pmask, *curr_pval; + + curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); + curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); + + if (*curr_pmask & mask) { /* disallow acting twice on the same location */ + NL_SET_ERR_MSG_MOD(extack, + "curr_pmask and new mask same. Acting twice on same location"); + goto out_err; + } + + *curr_pmask |= mask; + *curr_pval |= (val & mask); + + return 0; + +out_err: + return -EOPNOTSUPP; +} + +static int +parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; + u8 htype = act->mangle.htype; + int err = -EOPNOTSUPP; + u32 mask, val, offset; + + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); + goto out_err; + } + + if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) { + NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported"); + goto out_err; + } + + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; + + err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack); + if (err) + goto out_err; + + hdrs[cmd].pedits++; + + return 0; +out_err: + return err; +} + +static int +parse_pedit_to_reformat(const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) +{ + u32 mask, val, offset; + u32 *p; + + if (act->id != FLOW_ACTION_MANGLE) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action id"); + return -EOPNOTSUPP; + } + + if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { + NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); + return -EOPNOTSUPP; + } + + mask = ~act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; + p = (u32 *)&parse_attr->eth; + *(p + (offset >> 2)) |= (val & mask); + + return 0; +} + +int +mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) + return parse_pedit_to_reformat(act, parse_attr, extack); + + return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack); +} + +static bool +tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5e_tc_flow *flow = parse_state->flow; + enum mlx5_flow_namespace_type ns_type; + int err; + + ns_type = mlx5e_get_flow_namespace(flow); + + err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type, + attr->parse_attr, parse_state->hdrs, + flow, parse_state->extack); + if (err) + return err; + + if (flow_flag_test(flow, L3_TO_L2_DECAP)) + goto out; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + esw_attr->split_count = esw_attr->out_count; + +out: + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_pedit = { + .can_offload = tc_act_can_offload_pedit, + .parse_action = tc_act_parse_pedit, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h new file mode 100644 index 000000000000..da8ab03af58f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_PEDIT_H__ +#define __MLX5_EN_TC_ACT_PEDIT_H__ + +#include "en_tc.h" + +struct pedit_headers { + struct ethhdr eth; + struct vlan_hdr vlan; + struct iphdr ip4; + struct ipv6hdr ip6; + struct tcphdr tcp; + struct udphdr udp; +}; + +struct pedit_headers_action { + struct pedit_headers vals; + struct pedit_headers masks; + u32 pedits; +}; + +int +mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c new file mode 100644 index 000000000000..0819110193dc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (act->ptype != PACKET_HOST) { + NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host"); + return -EOPNOTSUPP; + } + + parse_state->ptype_host = true; + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_ptype = { + .can_offload = tc_act_can_offload_ptype, + .parse_action = tc_act_parse_ptype, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c new file mode 100644 index 000000000000..1c32e24e528d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct net_device *out_dev = act->dev; + struct mlx5_esw_flow_attr *esw_attr; + + parse_attr = flow->attr->parse_attr; + esw_attr = flow->attr->esw_attr; + + if (!out_dev) + return false; + + if (!netif_is_ovs_master(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is supported only for OVS internal ports"); + return false; + } + + if (netif_is_ovs_master(parse_attr->filter_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to ingress is not supported from internal port"); + return false; + } + + if (!parse_state->ptype_host) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress requires ptype=host action"); + return false; + } + + if (esw_attr->out_count) { + NL_SET_ERR_MSG_MOD(extack, + "redirect to int port ingress is supported only as single destination"); + return false; + } + + return true; +} + +static int +tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *out_dev = act->dev; + int err; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, + MLX5E_TC_INT_PORT_INGRESS, + &attr->action, esw_attr->out_count); + if (err) + return err; + + esw_attr->out_count++; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = { + .can_offload = tc_act_can_offload_redirect_ingress, + .parse_action = tc_act_parse_redirect_ingress, +}; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c new file mode 100644 index 000000000000..6699bdf5cf01 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <net/psample.h> +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (flow_flag_test(parse_state->flow, CT)) { + NL_SET_ERR_MSG_MOD(extack, + "Sample action with connection tracking is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_sample_attr *sample_attr; + + sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); + if (!sample_attr) + return -ENOMEM; + + sample_attr->rate = act->sample.rate; + sample_attr->group_num = act->sample.psample_group->group_num; + + if (act->sample.truncate) + sample_attr->trunc_size = act->sample.trunc_size; + + attr->sample_attr = sample_attr; + flow_flag_set(parse_state->flow, SAMPLE); + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_sample = { + .can_offload = tc_act_can_offload_sample, + .parse_action = tc_act_parse_sample, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c new file mode 100644 index 000000000000..046b64c2cec4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + struct netlink_ext_ack *extack = parse_state->extack; + + if (parse_state->num_actions != 1) { + NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only"); + return false; + } + + return true; +} + +static int +tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_trap = { + .can_offload = tc_act_can_offload_trap, + .parse_action = tc_act_parse_trap, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c new file mode 100644 index 000000000000..6f4a2cf46afd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "act.h" +#include "en/tc_tun_encap.h" +#include "en/tc_priv.h" + +static bool +tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + if (!act->tunnel) { + NL_SET_ERR_MSG_MOD(parse_state->extack, + "Zero tunnel attributes is not supported"); + return false; + } + + return true; +} + +static int +tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->tun_info = act->tunnel; + parse_state->encap = true; + + return 0; +} + +static bool +tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + parse_state->decap = true; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_tun_encap = { + .can_offload = tc_act_can_offload_tun_encap, + .parse_action = tc_act_parse_tun_encap, +}; + +struct mlx5e_tc_act mlx5e_tc_act_tun_decap = { + .can_offload = tc_act_can_offload_tun_decap, + .parse_action = tc_act_parse_tun_decap, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c new file mode 100644 index 000000000000..70fc0c2d8813 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_priv.h" + +static int +add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + const struct flow_action_entry prio_tag_act = { + .vlan.vid = 0, + .vlan.prio = + MLX5_GET(fte_match_set_lyr_2_4, + mlx5e_get_match_headers_value(*action, + &parse_attr->spec), + first_prio) & + MLX5_GET(fte_match_set_lyr_2_4, + mlx5e_get_match_headers_criteria(*action, + &parse_attr->spec), + first_prio), + }; + + return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, + &prio_tag_act, parse_attr, hdrs, action, + extack); +} + +static int +parse_tc_vlan_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct mlx5_esw_flow_attr *attr, + u32 *action, + struct netlink_ext_ack *extack) +{ + u8 vlan_idx = attr->total_vlan; + + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) { + NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported"); + return -EOPNOTSUPP; + } + + switch (act->id) { + case FLOW_ACTION_VLAN_POP: + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; + } else { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } + break; + case FLOW_ACTION_VLAN_PUSH: + attr->vlan_vid[vlan_idx] = act->vlan.vid; + attr->vlan_prio[vlan_idx] = act->vlan.prio; + attr->vlan_proto[vlan_idx] = act->vlan.proto; + if (!attr->vlan_proto[vlan_idx]) + attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); + + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) { + NL_SET_ERR_MSG_MOD(extack, + "vlan push action is not supported for vlan depth > 1"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; + } else { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && + (act->vlan.proto != htons(ETH_P_8021Q) || + act->vlan.prio)) { + NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported"); + return -EOPNOTSUPP; + } + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + } + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN"); + return -EINVAL; + } + + attr->total_vlan = vlan_idx + 1; + + return 0; +} + +int +mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device **out_dev, + struct netlink_ext_ack *extack) +{ + struct net_device *vlan_dev = *out_dev; + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_PUSH, + .vlan.vid = vlan_dev_vlan_id(vlan_dev), + .vlan.proto = vlan_dev_vlan_proto(vlan_dev), + .vlan.prio = 0, + }; + int err; + + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack); + if (err) + return err; + + rcu_read_lock(); + *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev)); + rcu_read_unlock(); + if (!*out_dev) + return -ENODEV; + + if (is_vlan_dev(*out_dev)) + err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack); + + return err; +} + +int +mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack) +{ + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_POP, + }; + int nest_level, err = 0; + + nest_level = attr->parse_attr->filter_dev->lower_level - + priv->netdev->lower_level; + while (nest_level--) { + err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, + extack); + if (err) + return err; + } + + return err; +} + +static bool +tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int err; + + if (act->id == FLOW_ACTION_VLAN_PUSH && + (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { + /* Replace vlan pop+push with vlan modify */ + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act, + attr->parse_attr, parse_state->hdrs, + &attr->action, parse_state->extack); + } else { + err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action, + parse_state->extack); + } + + if (err) + return err; + + esw_attr->split_count = esw_attr->out_count; + + return 0; +} + +static int +tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct pedit_headers_action *hdrs = parse_state->hdrs; + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int err; + + if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { + /* For prio tag mode, replace vlan pop with rewrite vlan prio + * tag rewrite. + */ + attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, + &attr->action, extack); + if (err) + return err; + } + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_vlan = { + .can_offload = tc_act_can_offload_vlan, + .parse_action = tc_act_parse_vlan, + .post_parse = tc_act_post_parse_vlan, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h new file mode 100644 index 000000000000..3d62f13ab61f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_EN_TC_ACT_VLAN_H__ +#define __MLX5_EN_TC_ACT_VLAN_H__ + +#include <net/flow_offload.h> +#include "en/tc_priv.h" + +struct pedit_headers_action; + +int +mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct net_device **out_dev, + struct netlink_ext_ack *extack); + +int +mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack); + +int +mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack); + +#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c new file mode 100644 index 000000000000..63e36e7f53e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/if_vlan.h> +#include "act.h" +#include "vlan.h" +#include "en/tc_priv.h" + +struct pedit_headers_action; + +int +mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + u16 mask16 = VLAN_VID_MASK; + u16 val16 = act->vlan.vid & VLAN_VID_MASK; + const struct flow_action_entry pedit_act = { + .id = FLOW_ACTION_MANGLE, + .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, + .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), + .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), + .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), + }; + u8 match_prio_mask, match_prio_val; + void *headers_c, *headers_v; + int err; + + headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec); + headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec); + + if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { + NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match"); + return -EOPNOTSUPP; + } + + match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); + match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); + if (act->vlan.prio != (match_prio_val & match_prio_mask)) { + NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported"); + return -EOPNOTSUPP; + } + + err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs, + NULL, extack); + *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return err; +} + +static bool +tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index) +{ + return true; +} + +static int +tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + enum mlx5_flow_namespace_type ns_type; + int err; + + ns_type = mlx5e_get_flow_namespace(parse_state->flow); + err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act, + attr->parse_attr, parse_state->hdrs, + &attr->action, parse_state->extack); + if (err) + return err; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) + attr->esw_attr->split_count = attr->esw_attr->out_count; + + return 0; +} + +struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = { + .can_offload = tc_act_can_offload_vlan_mangle, + .parse_action = tc_act_parse_vlan_mangle, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index df6888c4793c..ff4b4f8a5a9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -5,6 +5,7 @@ #include <net/psample.h> #include "en/mapping.h" #include "en/tc/post_act.h" +#include "en/mod_hdr.h" #include "sample.h" #include "eswitch.h" #include "en_tc.h" @@ -255,12 +256,12 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id, goto err_modify_hdr; } - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); return modify_hdr; err_modify_hdr: err_post_act: - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); err_set_regc0: return ERR_PTR(err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c1c6e74c79c4..4a0d38d219ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -14,6 +14,7 @@ #include <linux/workqueue.h> #include <linux/refcount.h> #include <linux/xarray.h> +#include <linux/if_macvlan.h> #include "lib/fs_chains.h" #include "en/tc_ct.h" @@ -36,6 +37,12 @@ #define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen) #define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0) +/* Statically allocate modify actions for + * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10. + * This will be increased dynamically if needed (for the ipv6 snat + dnat). + */ +#define MLX5_CT_MIN_MOD_ACTS 10 + #define ct_dbg(fmt, args...)\ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args) @@ -320,7 +327,33 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple, } static int -mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, +mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv, + struct net_device *ndev) +{ + struct mlx5e_priv *other_priv = netdev_priv(ndev); + struct mlx5_core_dev *mdev = ct_priv->dev; + bool vf_rep, uplink_rep; + + vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev); + uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev); + + if (vf_rep) + return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; + if (uplink_rep) + return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + if (is_vlan_dev(ndev)) + return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev)); + if (netif_is_macvlan(ndev)) + return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev)); + if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev)) + return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; + + return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT; +} + +static int +mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_flow_spec *spec, struct flow_rule *rule) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -335,8 +368,7 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, flow_rule_match_basic(rule, &match); - mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c, - headers_v); + mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v); MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, match.mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, @@ -432,6 +464,23 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, ntohs(match.key->flags)); } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + + if (match.key->ingress_ifindex & match.mask->ingress_ifindex) { + struct net_device *dev; + + dev = dev_get_by_index(&init_net, match.key->ingress_ifindex); + if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source)) + spec->flow_context.flow_source = + mlx5_tc_ct_get_flow_source_match(ct_priv, dev); + + dev_put(dev); + } + } + return 0; } @@ -609,22 +658,15 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv, struct flow_action *flow_action = &flow_rule->action; struct mlx5_core_dev *mdev = ct_priv->dev; struct flow_action_entry *act; - size_t action_size; char *modact; int err, i; - action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); - flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_MANGLE: { - err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type, - mod_acts); - if (err) - return err; - - modact = mod_acts->actions + - mod_acts->num_actions * action_size; + modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts); + if (IS_ERR(modact)) + return PTR_ERR(modact); err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact); if (err) @@ -652,7 +694,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, struct mlx5e_mod_hdr_handle **mh, u8 zone_restore_id, bool nat) { - struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS); + DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr); struct flow_action_entry *meta; u16 ct_state = 0; int err; @@ -706,11 +749,11 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, attr->modify_hdr = mlx5e_mod_hdr_get(*mh); } - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); return 0; err_mapping: - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); return err; } @@ -770,7 +813,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) attr->esw_attr->in_mdev = priv->mdev; - mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); + mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK); zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr); @@ -907,12 +950,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple rev_tuple = entry->tuple; struct mlx5_ct_counter *shared_counter; struct mlx5_ct_entry *rev_entry; - __be16 tmp_port; /* get the reversed tuple */ - tmp_port = rev_tuple.port.src; - rev_tuple.port.src = rev_tuple.port.dst; - rev_tuple.port.dst = tmp_port; + swap(rev_tuple.port.src, rev_tuple.port.dst); if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { __be32 tmp_addr = rev_tuple.ip.src_v4; @@ -1356,9 +1396,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv, int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { + bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; + int err; + if (!priv) { NL_SET_ERR_MSG_MOD(extack, "offload of ct action isn't available"); @@ -1369,6 +1413,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; + if (!clear_action) + goto out; + + err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear"); + return err; + } + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + +out: return 0; } @@ -1445,7 +1500,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft, } pre_ct->miss_rule = rule; - dealloc_mod_hdr_actions(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(&pre_mod_acts); kvfree(spec); return 0; @@ -1454,7 +1509,7 @@ err_miss_rule: err_flow_rule: mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr); err_mapping: - dealloc_mod_hdr_actions(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(&pre_mod_acts); kvfree(spec); return err; } @@ -1850,14 +1905,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } attr->ct_attr.ct_flow = ct_flow; - dealloc_mod_hdr_actions(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(&pre_mod_acts); return ct_flow->pre_ct_rule; err_insert_orig: mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); err_mapping: - dealloc_mod_hdr_actions(&pre_mod_acts); + mlx5e_mod_hdr_dealloc(&pre_mod_acts); mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); err_get_chain: kfree(ct_flow->pre_ct_attr); @@ -1898,23 +1953,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, memcpy(pre_ct_attr, attr, attr_sz); - err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0); - if (err) { - ct_dbg("Failed to set register for ct clear"); - goto err_set_registers; - } - mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type, mod_acts->num_actions, mod_acts->actions); if (IS_ERR(mod_hdr)) { err = PTR_ERR(mod_hdr); ct_dbg("Failed to add create ct clear mod hdr"); - goto err_set_registers; + goto err_mod_hdr; } pre_ct_attr->modify_hdr = mod_hdr; - pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr); if (IS_ERR(rule)) { @@ -1930,7 +1978,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv, err_insert: mlx5_modify_header_dealloc(priv->mdev, mod_hdr); -err_set_registers: +err_mod_hdr: netdev_warn(priv->netdev, "Failed to offload ct clear flow, err %d\n", err); kfree(pre_ct_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 363329f4aac6..99662af1e41a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec); int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack); @@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) static inline int mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_acts, const struct flow_action_entry *act, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 8f64f2c8895a..f832c26ff2c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -5,11 +5,14 @@ #define __MLX5_EN_TC_PRIV_H__ #include "en_tc.h" +#include "en/tc/act/act.h" #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1) #define MLX5E_TC_MAX_SPLITS 1 +#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains) + enum { MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, @@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; struct ethhdr eth; + struct mlx5e_tc_act_parse_state parse_state; }; /* Helper struct for accessing a struct containing list_head array. @@ -102,6 +106,7 @@ struct mlx5e_tc_flow { refcount_t refcnt; struct rcu_head rcu_head; struct completion init_done; + struct completion del_hw_done; int tunnel_id; /* the mapped tunnel id of this flow */ struct mlx5_flow_attr *attr; }; @@ -114,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr); +bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); +bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); +int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow); +bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) { @@ -175,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); struct mlx5e_tc_int_port_priv * mlx5e_get_int_port_priv(struct mlx5e_priv *priv); + +void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec); +void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec); + #endif /* __MLX5_EN_TC_PRIV_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index a5e450973225..33815246fead 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -103,7 +103,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, } static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, - struct net_device *mirred_dev, + struct net_device *dev, struct mlx5e_tc_tun_route_attr *attr) { struct net_device *route_dev; @@ -122,13 +122,13 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; } else { - struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev); if (tunnel && tunnel->get_remote_ifindex) - attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev); + attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(dev); } - rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); + rt = ip_route_output_key(dev_net(dev), &attr->fl.fl4); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -440,10 +440,10 @@ release_neigh: #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, - struct net_device *mirred_dev, + struct net_device *dev, struct mlx5e_tc_tun_route_attr *attr) { - struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev); + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev); struct net_device *route_dev; struct net_device *out_dev; struct dst_entry *dst; @@ -451,8 +451,8 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, int ret; if (tunnel && tunnel->get_remote_ifindex) - attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev); - dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6, + attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(dev); + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(dev), NULL, &attr->fl.fl6, NULL); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -708,7 +708,8 @@ release_neigh: int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *flow_attr) + struct mlx5_flow_attr *flow_attr, + struct net_device *filter_dev) { struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; struct mlx5e_tc_int_port *int_port; @@ -720,14 +721,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, /* Addresses are swapped for decap */ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; - err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); + err = mlx5e_route_lookup_ipv4_get(priv, filter_dev, &attr); } #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) else if (flow_attr->tun_ip_version == 6) { /* Addresses are swapped for decap */ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; - err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr); + err = mlx5e_route_lookup_ipv6_get(priv, filter_dev, &attr); } #endif else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index aa092eaeaec3..b38f693bbb52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -94,7 +94,8 @@ mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, #endif int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *attr); + struct mlx5_flow_attr *attr, + struct net_device *filter_dev); bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 660cca73c36c..9918ed8c059b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, struct list_head *flow_list, int index) { - if (IS_ERR(mlx5e_flow_get(flow))) + if (IS_ERR(mlx5e_flow_get(flow))) { + /* Flow is being deleted concurrently. Wait for it to be + * unoffloaded from hardware, otherwise deleting encap will + * fail. + */ + wait_for_completion(&flow->del_hw_done); return; + } wait_for_completion(&flow->init_done); flow->tmp_entry_index = index; @@ -1153,7 +1159,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, tbl_time_before = mlx5e_route_tbl_get_last_update(priv); tbl_time_after = tbl_time_before; - err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr); + err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev); if (err || !esw_attr->rx_tun_attr->decap_vport) goto out; @@ -1474,7 +1480,7 @@ static void mlx5e_reoffload_decap(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; spec = &parse_attr->spec; - err = mlx5e_tc_tun_route_lookup(priv, spec, attr); + err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev); if (err) { mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n", err); @@ -1573,6 +1579,8 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv, struct net_device *fib_dev; fen_info = container_of(info, struct fib_entry_notifier_info, info); + if (fen_info->fi->nh) + return NULL; fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops || fen_info->dst_len != 32) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 2f0df5cc1a2d..338d65e2c9ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -151,7 +151,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, rq->stats->xdp_redirect++; return true; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rq->netdev, prog, act); fallthrough; case XDP_ABORTED: xdp_abort: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 7b562d2c8a19..279cd8f4e79f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, { struct device *dev = mlx5_core_dma_dev(priv->mdev); - return xsk_pool_dma_map(pool, dev, 0); + return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC); } static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool) { - return xsk_pool_dma_unmap(pool, 0); + return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); } static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 538bc2419bd8..25eac9e20342 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -4,6 +4,7 @@ #include "setup.h" #include "en/params.h" #include "en/txrx.h" +#include "en/health.h" /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may * change unexpectedly, and mlx5e has a minimum valid stride size for striding @@ -67,7 +68,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdpsq = &c->rq_xdpsq; rq->xsk_pool = pool; - rq->stats = &c->priv->channel_stats[c->ix].xskrq; + rq->stats = &c->priv->channel_stats[c->ix]->xskrq; rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK; err = mlx5e_rq_set_handlers(rq, params, xsk); @@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) void mlx5e_activate_xsk(struct mlx5e_channel *c) { + /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid + * activating XSKRQ in the middle of recovery. + */ + mlx5e_reporter_icosq_suspend_recovery(c); set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); + mlx5e_reporter_icosq_resume_recovery(c); + /* TX queue is created active. */ spin_lock_bh(&c->async_icosq_lock); @@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) void mlx5e_deactivate_xsk(struct mlx5e_channel *c) { - mlx5e_deactivate_rq(&c->xskrq); + /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the + * middle of recovery. Suspend the recovery to avoid it. + */ + mlx5e_reporter_icosq_suspend_recovery(c); + clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); + mlx5e_reporter_icosq_resume_recovery(c); + synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ + /* TX queue is disabled on close. */ } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index fb5397324aa4..2db9573a3fe6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2; - if (skb->protocol == htons(ETH_P_IPV6)) + if (inner_ip_hdr(skb)->version == 6) eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 62abce008c7b..96064a2033f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx { DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); /* resync */ + spinlock_t lock; /* protects resync fields */ struct mlx5e_ktls_rx_resync_ctx resync; struct list_head list; }; @@ -99,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void) return resp_list; } -static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn) -{ - struct mlx5e_tir_builder *builder; - int err; - - builder = mlx5e_tir_builder_alloc(false); - if (!builder) - return -ENOMEM; - - mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false); - mlx5e_tir_builder_build_direct(builder); - mlx5e_tir_builder_build_tls(builder); - err = mlx5e_tir_init(tir, builder, mdev, false); - - mlx5e_tir_builder_free(builder); - - return err; -} - static void accel_rule_handle_work(struct work_struct *work) { struct mlx5e_ktls_offload_context_rx *priv_rx; @@ -386,14 +368,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r struct mlx5e_icosq *sq; bool trigger_poll; - memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); - sq = &c->async_icosq; ktls_resync = sq->ktls_resync; + trigger_poll = false; spin_lock_bh(&ktls_resync->lock); - list_add_tail(&priv_rx->list, &ktls_resync->list); - trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + spin_lock_bh(&priv_rx->lock); + memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); + if (list_empty(&priv_rx->list)) { + list_add_tail(&priv_rx->list, &ktls_resync->list); + trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); + } + spin_unlock_bh(&priv_rx->lock); spin_unlock_bh(&ktls_resync->lock); if (!trigger_poll) @@ -604,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev; struct mlx5e_priv *priv; int rxq, err; - u32 rqtn; tls_ctx = tls_get_ctx(sk); priv = netdev_priv(netdev); @@ -617,6 +602,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_create_key; + INIT_LIST_HEAD(&priv_rx->list); + spin_lock_init(&priv_rx->lock); priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; @@ -624,13 +611,11 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->rxq = rxq; priv_rx->sk = sk; - priv_rx->rq_stats = &priv->channel_stats[rxq].rq; + priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq); - - err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn); + err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); if (err) goto err_create_tir; @@ -730,10 +715,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) priv_rx = list_first_entry(&local_list, struct mlx5e_ktls_offload_context_rx, list); + spin_lock(&priv_rx->lock); cseg = post_static_params(sq, priv_rx); - if (IS_ERR(cseg)) + if (IS_ERR(cseg)) { + spin_unlock(&priv_rx->lock); break; - list_del(&priv_rx->list); + } + list_del_init(&priv_rx->list); + spin_unlock(&priv_rx->lock); db_cseg = cseg; } if (db_cseg) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index fe5d82fa6e92..49cca6bd49a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -556,7 +556,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - priv->channel_stats[arfs_rule->rxq].rq.arfs_err++; + priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; mlx5e_dbg(HW, priv, "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n", __func__, arfs_rule->filter_id, arfs_rule->rxq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c2ea5fad48dd..57d755db1cf5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, } static void mlx5e_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -380,7 +382,9 @@ unlock: } static int mlx5e_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -511,7 +515,8 @@ static int mlx5e_set_channels(struct net_device *dev, } int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal) { struct dim_cq_moder *rx_moder, *tx_moder; @@ -528,6 +533,11 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, coal->tx_max_coalesced_frames = tx_moder->pkts; coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; + kernel_coal->use_cqe_mode_rx = + MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = + MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER); + return 0; } @@ -538,7 +548,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); } #define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD @@ -578,14 +588,26 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal } } +/* convert a boolean value of cq_mode to mlx5 period mode + * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE + * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE + */ +static int cqe_mode_to_period_mode(bool val) +{ + return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; +} + int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params new_params; bool reset_rx, reset_tx; bool reset = true; + u8 cq_period_mode; int err = 0; if (!MLX5_CAP_GEN(mdev, cq_moderation)) @@ -605,6 +627,12 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, return -ERANGE; } + if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && + !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) { + NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device"); + return -EOPNOTSUPP; + } + mutex_lock(&priv->state_lock); new_params = priv->channels.params; @@ -621,6 +649,18 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; + cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx); + if (cq_period_mode != rx_moder->cq_period_mode) { + mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); + reset_rx = true; + } + + cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx); + if (cq_period_mode != tx_moder->cq_period_mode) { + mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); + reset_tx = true; + } + if (reset_rx) { u8 mode = MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER); @@ -656,9 +696,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_set_coalesce(priv, coal); + return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); } static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, @@ -1843,24 +1883,19 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, bool is_rx_cq) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_params new_params; - bool mode_changed; u8 cq_period_mode, current_cq_period_mode; + struct mlx5e_params new_params; + + if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) + return -EOPNOTSUPP; + + cq_period_mode = cqe_mode_to_period_mode(enable); - cq_period_mode = enable ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; current_cq_period_mode = is_rx_cq ? priv->channels.params.rx_cq_moderation.cq_period_mode : priv->channels.params.tx_cq_moderation.cq_period_mode; - mode_changed = cq_period_mode != current_cq_period_mode; - - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && - !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) - return -EOPNOTSUPP; - if (!mode_changed) + if (cq_period_mode == current_cq_period_mode) return 0; new_params = priv->channels.params; @@ -1894,7 +1929,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - if (new_val && !priv->profile->rx_ptp_support && rx_filter) { + if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) { netdev_err(priv->netdev, "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); return -EINVAL; @@ -2358,7 +2393,8 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev, const struct ethtool_ops mlx5e_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USE_ADAPTIVE, + ETHTOOL_COALESCE_USE_ADAPTIVE | + ETHTOOL_COALESCE_USE_CQE, .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ext_state = mlx5e_get_link_ext_state, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 65571593ec5c..ac69e0aa09bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -37,6 +37,7 @@ #include <net/geneve.h> #include <linux/bpf.h> #include <linux/if_bridge.h> +#include <linux/filter.h> #include <net/page_pool.h> #include <net/xdp_sock_drv.h> #include "eswitch.h" @@ -479,7 +480,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->xdpsq = &c->rq_xdpsq; - rq->stats = &c->priv->channel_stats[c->ix].rq; + rq->stats = &c->priv->channel_stats[c->ix]->rq; rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); err = mlx5e_rq_set_handlers(rq, params, NULL); if (err) @@ -1087,8 +1088,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); - if (rq->icosq) - cancel_work_sync(&rq->icosq->recover_work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1161,10 +1160,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->xsk_pool = xsk_pool; sq->stats = sq->xsk_pool ? - &c->priv->channel_stats[c->ix].xsksq : + &c->priv->channel_stats[c->ix]->xsksq : is_redirect ? - &c->priv->channel_stats[c->ix].xdpsq : - &c->priv->channel_stats[c->ix].rq_xdpsq; + &c->priv->channel_stats[c->ix]->xdpsq : + &c->priv->channel_stats[c->ix]->rq_xdpsq; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1216,9 +1215,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_icosq_cqe_err(sq); } +static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work) +{ + struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, + recover_work); + + /* Not implemented yet. */ + + netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n"); +} + static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5e_sq_param *param, - struct mlx5e_icosq *sq) + struct mlx5e_icosq *sq, + work_func_t recover_work_func) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; @@ -1239,7 +1249,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); + INIT_WORK(&sq->recover_work, recover_work_func); return 0; @@ -1575,13 +1585,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_tx_err_cqe(sq); } -int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, - struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) +static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, + work_func_t recover_work_func) { struct mlx5e_create_sq_param csp = {}; int err; - err = mlx5e_alloc_icosq(c, param, sq); + err = mlx5e_alloc_icosq(c, param, sq, recover_work_func); if (err) return err; @@ -1620,7 +1631,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) synchronize_net(); /* Sync with NAPI. */ } -void mlx5e_close_icosq(struct mlx5e_icosq *sq) +static void mlx5e_close_icosq(struct mlx5e_icosq *sq) { struct mlx5e_channel *c = sq->channel; @@ -1928,7 +1939,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, params, &cparam->txq_sq, &c->sq[tc], tc, qos_queue_group_id, - &c->priv->channel_stats[c->ix].sq[tc]); + &c->priv->channel_stats[c->ix]->sq[tc]); if (err) goto err_close_sqs; } @@ -2084,11 +2095,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, spin_lock_init(&c->async_icosq_lock); - err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); + err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq, + mlx5e_async_icosq_err_cqe_work); if (err) goto err_close_xdpsq_cq; - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); + mutex_init(&c->icosq_recovery_lock); + + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq, + mlx5e_icosq_err_cqe_work); if (err) goto err_close_async_icosq; @@ -2156,9 +2171,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_xdpsq(&c->xdpsq); if (c->xdp) mlx5e_close_xdpsq(&c->rq_xdpsq); + /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */ + cancel_work_sync(&c->icosq.recover_work); mlx5e_close_rq(&c->rq); mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); + mutex_destroy(&c->icosq_recovery_lock); mlx5e_close_icosq(&c->async_icosq); if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); @@ -2176,6 +2194,30 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); } +static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu) +{ + if (ix > priv->stats_nch) { + netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix, + priv->stats_nch); + return -EINVAL; + } + + if (priv->channel_stats[ix]) + return 0; + + /* Asymmetric dynamic memory allocation. + * Freed in mlx5e_priv_arrays_free, not on channel closure. + */ + mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix); + priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats), + GFP_KERNEL, cpu_to_node(cpu)); + if (!priv->channel_stats[ix]) + return -ENOMEM; + priv->stats_nch++; + + return 0; +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_params *params, struct mlx5e_channel_param *cparam, @@ -2193,6 +2235,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) return err; + err = mlx5e_channel_stats_alloc(priv, ix, cpu); + if (err) + return err; + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -2207,7 +2253,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; - c->stats = &priv->channel_stats[ix].ch; + c->stats = &priv->channel_stats[ix]->ch; c->aff_mask = irq_get_effective_affinity_mask(irq); c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); @@ -2598,7 +2644,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, } } -int mlx5e_num_channels_changed(struct mlx5e_priv *priv) +static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) { u16 count = priv->channels.params.num_channels; int err; @@ -3371,7 +3417,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) int i; for (i = 0; i < priv->stats_nch; i++) { - struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i]; struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; int j; @@ -3559,11 +3605,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable) new_params = priv->channels.params; if (enable) { - if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { - netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n"); - err = -EINVAL; - goto out; - } new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO; new_params.packet_merge.shampo.match_criteria_type = MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED; @@ -3724,12 +3765,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable) static int mlx5e_handle_feature(struct net_device *netdev, netdev_features_t *features, - netdev_features_t wanted_features, netdev_features_t feature, mlx5e_feature_handler feature_handler) { - netdev_features_t changes = wanted_features ^ netdev->features; - bool enable = !!(wanted_features & feature); + netdev_features_t changes = *features ^ netdev->features; + bool enable = !!(*features & feature); int err; if (!(changes & feature)) @@ -3737,22 +3777,22 @@ static int mlx5e_handle_feature(struct net_device *netdev, err = feature_handler(netdev, enable); if (err) { + MLX5E_SET_FEATURE(features, feature, !enable); netdev_err(netdev, "%s feature %pNF failed, err %d\n", enable ? "Enable" : "Disable", &feature, err); return err; } - MLX5E_SET_FEATURE(features, feature, enable); return 0; } int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { - netdev_features_t oper_features = netdev->features; + netdev_features_t oper_features = features; int err = 0; #define MLX5E_HANDLE_FEATURE(feature, handler) \ - mlx5e_handle_feature(netdev, &oper_features, features, feature, handler) + mlx5e_handle_feature(netdev, &oper_features, feature, handler) err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); @@ -3826,6 +3866,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, features &= ~NETIF_F_RXHASH; if (netdev->features & NETIF_F_RXHASH) netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); + + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n"); + features &= ~NETIF_F_GRO_HW; + } } if (mlx5e_is_uplink_rep(priv)) @@ -4038,7 +4083,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) goto err_unlock; } - if (!priv->profile->rx_ptp_support) + if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) err = mlx5e_hwstamp_config_no_ptp_rx(priv, config.rx_filter != HWTSTAMP_FILTER_NONE); else @@ -4773,15 +4818,22 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; } if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { - netdev->hw_features |= NETIF_F_GSO_GRE; - netdev->hw_enc_features |= NETIF_F_GSO_GRE; - netdev->gso_partial_features |= NETIF_F_GSO_GRE; + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; } if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { @@ -5093,9 +5145,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, .stats_grps_num = mlx5e_nic_stats_grps_num, - .rx_ptp_support = true, + .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | + BIT(MLX5E_PROFILE_FEATURE_PTP_TX) | + BIT(MLX5E_PROFILE_FEATURE_QOS_HTB), }; +static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + int nch; + + nch = mlx5e_get_max_num_channels(mdev); + + if (profile->max_nch_limit) + nch = min_t(int, nch, profile->max_nch_limit(mdev)); + return nch; +} + static unsigned int mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, const struct mlx5e_profile *profile) @@ -5104,7 +5170,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, unsigned int max_nch, tmp; /* core resources */ - max_nch = mlx5e_get_max_num_channels(mdev); + max_nch = mlx5e_profile_max_num_channels(mdev, profile); /* netdev rx queues */ tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); @@ -5128,12 +5194,17 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, struct net_device *netdev, struct mlx5_core_dev *mdev) { + int nch, num_txqs, node, i; + + num_txqs = netdev->num_tx_queues; + nch = mlx5e_calc_max_nch(mdev, netdev, profile); + node = dev_to_node(mlx5_core_dma_dev(mdev)); + /* priv init */ priv->mdev = mdev; priv->netdev = netdev; priv->msglevel = MLX5E_MSG_LEVEL; - priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile); - priv->stats_nch = priv->max_nch; + priv->max_nch = nch; priv->max_opened_tc = 1; if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) @@ -5150,11 +5221,46 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, if (!priv->wq) goto err_free_cpumask; + priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node); + if (!priv->txq2sq) + goto err_destroy_workqueue; + + priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node); + if (!priv->tx_rates) + goto err_free_txq2sq; + + priv->channel_tc2realtxq = + kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node); + if (!priv->channel_tc2realtxq) + goto err_free_tx_rates; + + for (i = 0; i < nch; i++) { + priv->channel_tc2realtxq[i] = + kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq), + GFP_KERNEL, node); + if (!priv->channel_tc2realtxq[i]) + goto err_free_channel_tc2realtxq; + } + + priv->channel_stats = + kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node); + if (!priv->channel_stats) + goto err_free_channel_tc2realtxq; + return 0; +err_free_channel_tc2realtxq: + while (--i >= 0) + kfree(priv->channel_tc2realtxq[i]); + kfree(priv->channel_tc2realtxq); +err_free_tx_rates: + kfree(priv->tx_rates); +err_free_txq2sq: + kfree(priv->txq2sq); +err_destroy_workqueue: + destroy_workqueue(priv->wq); err_free_cpumask: free_cpumask_var(priv->scratchpad.cpumask); - return -ENOMEM; } @@ -5166,6 +5272,14 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) if (!priv->mdev) return; + for (i = 0; i < priv->stats_nch; i++) + kvfree(priv->channel_stats[i]); + kfree(priv->channel_stats); + for (i = 0; i < priv->max_nch; i++) + kfree(priv->channel_tc2realtxq[i]); + kfree(priv->channel_tc2realtxq); + kfree(priv->tx_rates); + kfree(priv->txq2sq); destroy_workqueue(priv->wq); free_cpumask_var(priv->scratchpad.cpumask); @@ -5181,13 +5295,44 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) memset(priv, 0, sizeof(*priv)); } +static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + unsigned int nch, ptp_txqs, qos_txqs; + + nch = mlx5e_profile_max_num_channels(mdev, profile); + + ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) && + mlx5e_profile_feature_cap(profile, PTP_TX) ? + profile->max_tc : 0; + + qos_txqs = mlx5_qos_is_supported(mdev) && + mlx5e_profile_feature_cap(profile, QOS_HTB) ? + mlx5e_qos_max_leaf_nodes(mdev) : 0; + + return nch * profile->max_tc + ptp_txqs + qos_txqs; +} + +static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile) +{ + unsigned int nch; + + nch = mlx5e_profile_max_num_channels(mdev, profile); + + return nch * profile->rq_groups; +} + struct net_device * -mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, - unsigned int txqs, unsigned int rxqs) +mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile) { struct net_device *netdev; + unsigned int txqs, rxqs; int err; + txqs = mlx5e_get_max_num_txqs(mdev, profile); + rxqs = mlx5e_get_max_num_rxqs(mdev, profile); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); @@ -5432,22 +5577,10 @@ static int mlx5e_probe(struct auxiliary_device *adev, struct mlx5_core_dev *mdev = edev->mdev; struct net_device *netdev; pm_message_t state = {}; - unsigned int txqs, rxqs, ptp_txqs = 0; struct mlx5e_priv *priv; - int qos_sqs = 0; int err; - int nch; - - if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) - ptp_txqs = profile->max_tc; - - if (mlx5_qos_is_supported(mdev)) - qos_sqs = mlx5e_qos_max_leaf_nodes(mdev); - nch = mlx5e_get_max_num_channels(mdev); - txqs = nch * profile->max_tc + ptp_txqs + qos_sqs; - rxqs = nch * profile->rq_groups; - netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs); + netdev = mlx5e_create_netdev(mdev, profile); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e58a9ec42553..06d1f46f1688 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -50,6 +50,7 @@ #include "fs_core.h" #include "lib/mlx5.h" #include "lib/devcom.h" +#include "lib/vxlan.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" #include "en_accel/ipsec.h" @@ -219,16 +220,22 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) } } -static void mlx5e_rep_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) +static void +mlx5e_rep_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); mlx5e_ethtool_get_ringparam(priv, param); } -static int mlx5e_rep_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) +static int +mlx5e_rep_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -258,7 +265,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); } static int mlx5e_rep_set_coalesce(struct net_device *netdev, @@ -268,7 +275,7 @@ static int mlx5e_rep_set_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); - return mlx5e_ethtool_set_coalesce(priv, coal); + return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); } static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) @@ -585,6 +592,12 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev) return netdev->netdev_ops == &mlx5e_netdev_ops_rep; } +static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev) +{ + return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) / + mlx5_eswitch_get_total_vports(mdev); +} + static void mlx5e_build_rep_params(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1027,6 +1040,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(netdev)) mlx5e_open(netdev); + udp_tunnel_nic_reset_ntf(priv->netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -1048,6 +1062,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) mlx5_notifier_unregister(mdev, &priv->events_nb); mlx5e_rep_tc_disable(priv); mlx5_lag_remove_netdev(mdev, priv->netdev); + mlx5_vxlan_reset_to_default(mdev->vxlan); } static MLX5E_DEFINE_STATS_GRP(sw_rep, 0); @@ -1080,6 +1095,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(pme), &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), +#ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_sw), + &MLX5E_STATS_GRP(ipsec_hw), +#endif }; static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv) @@ -1103,7 +1122,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_rep_stats_grps, .stats_grps_num = mlx5e_rep_stats_grps_num, - .rx_ptp_support = false, + .max_nch_limit = mlx5e_rep_max_nch_limit, }; static const struct mlx5e_profile mlx5e_uplink_rep_profile = { @@ -1124,7 +1143,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_ul_rep_stats_grps, .stats_grps_num = mlx5e_ul_rep_stats_grps_num, - .rx_ptp_support = false, }; /* e-Switch vport representors */ @@ -1175,14 +1193,10 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) struct devlink_port *dl_port; struct net_device *netdev; struct mlx5e_priv *priv; - unsigned int txqs, rxqs; - int nch, err; + int err; profile = &mlx5e_rep_profile; - nch = mlx5e_get_max_num_channels(dev); - txqs = nch * profile->max_tc; - rxqs = nch * profile->rq_groups; - netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs); + netdev = mlx5e_create_netdev(dev, profile); if (!netdev) { mlx5_core_warn(dev, "Failed to create representor netdev for vport %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 96967b0a2441..e86ccc22fb82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include <net/ip6_checksum.h> #include <net/page_pool.h> #include <net/inet_ecn.h> +#include <net/gro.h> #include <net/udp.h> #include <net/tcp.h> #include "en.h" @@ -278,8 +279,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - PAGE_SIZE, rq->buff.map_dir); + dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE, + rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { page_pool_recycle_direct(rq->page_pool, dma_info->page); dma_info->page = NULL; @@ -300,7 +301,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); + dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, + DMA_ATTR_SKIP_CPU_SYNC); } void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, @@ -543,13 +545,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, u16 klm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries; + u16 entries, pi, header_offset, err, wqe_bbs, new_entries; u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; struct page *page = shampo->last_page; u64 addr = shampo->last_addr; struct mlx5e_dma_info *dma_info; struct mlx5e_umr_wqe *umr_wqe; - int headroom; + int headroom, i; headroom = rq->buff.headroom; new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); @@ -601,9 +603,7 @@ update_klm: err_unmap: while (--i >= 0) { - if (--index < 0) - index = shampo->hd_per_wq - 1; - dma_info = &shampo->info[index]; + dma_info = &shampo->info[--index]; if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); mlx5e_page_release(rq, dma_info, true); @@ -620,7 +620,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) struct mlx5e_icosq *sq = rq->icosq; int i, err, max_klm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); + max_klm_entries = MLX5E_MAX_KLM_PER_WQE; klm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); @@ -1604,6 +1604,12 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } } +static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + trigger_report(rq, cqe); + rq->stats->wqe_err++; +} + static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; @@ -1617,8 +1623,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) cqe_bcnt = be32_to_cpu(cqe->byte_cnt); if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - trigger_report(rq, cqe); - rq->stats->wqe_err++; + mlx5e_handle_rx_err_cqe(rq, cqe); goto free_wqe; } @@ -1671,7 +1676,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) cqe_bcnt = be32_to_cpu(cqe->byte_cnt); if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - rq->stats->wqe_err++; + mlx5e_handle_rx_err_cqe(rq, cqe); goto free_wqe; } @@ -1720,8 +1725,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 wi->consumed_strides += cstrides; if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - trigger_report(rq, cqe); - rq->stats->wqe_err++; + mlx5e_handle_rx_err_cqe(rq, cqe); goto mpwrq_cqe_out; } @@ -1989,8 +1993,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq wi->consumed_strides += cstrides; if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - trigger_report(rq, cqe); - stats->wqe_err++; + mlx5e_handle_rx_err_cqe(rq, cqe); goto mpwrq_cqe_out; } @@ -2059,8 +2062,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq wi->consumed_strides += cstrides; if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - trigger_report(rq, cqe); - rq->stats->wqe_err++; + mlx5e_handle_rx_err_cqe(rq, cqe); goto mpwrq_cqe_out; } @@ -2190,7 +2192,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, priv = mlx5i_epriv(netdev); tstamp = &priv->tstamp; - stats = &priv->channel_stats[rq->ix].rq; + stats = rq->stats; flags_rqpn = be32_to_cpu(cqe->flags_rqpn); g = (flags_rqpn >> 28) & 3; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 2a9bfc3ffa2e..26e326fe503c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -35,6 +35,7 @@ #include "en_accel/tls.h" #include "en_accel/en_accel.h" #include "en/ptp.h" +#include "en/port.h" static unsigned int stats_grps_num(struct mlx5e_priv *priv) { @@ -463,7 +464,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) for (i = 0; i < priv->stats_nch; i++) { struct mlx5e_channel_stats *channel_stats = - &priv->channel_stats[i]; + priv->channel_stats[i]; int j; mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); @@ -1158,12 +1159,99 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } -void mlx5e_stats_fec_get(struct mlx5e_priv *priv, - struct ethtool_fec_stats *fec_stats) +static int fec_num_lanes(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {}; + int err; + + MLX5_SET(pmlp_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_PMLP, 0, 0); + if (err) + return 0; + + return MLX5_GET(pmlp_reg, out, width); +} + +static int fec_active_mode(struct mlx5_core_dev *mdev) +{ + unsigned long fec_active_long; + u32 fec_active; + + if (mlx5e_get_fec_mode(mdev, &fec_active, NULL)) + return MLX5E_FEC_NOFEC; + + fec_active_long = fec_active; + return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE); +} + +#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \ + fec_stats->corrected_blocks.lanes[(idx)] = \ + MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ + fc_fec_corrected_blocks_lane##idx); \ + fec_stats->uncorrectable_blocks.lanes[(idx)] = \ + MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ + fc_fec_uncorrectable_blocks_lane##idx); \ +}) + +static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats, + u32 *ppcnt, u8 lanes) +{ + if (lanes > 3) { /* 4 lanes */ + MLX5E_STATS_SET_FEC_BLOCK(3); + MLX5E_STATS_SET_FEC_BLOCK(2); + } + if (lanes > 1) /* 2 lanes */ + MLX5E_STATS_SET_FEC_BLOCK(1); + if (lanes > 0) /* 1 lane */ + MLX5E_STATS_SET_FEC_BLOCK(0); +} + +static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt) +{ + fec_stats->corrected_blocks.total = + MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, + rs_fec_corrected_blocks); + fec_stats->uncorrectable_blocks.total = + MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, + rs_fec_uncorrectable_blocks); +} + +static void fec_set_block_stats(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + int mode = fec_active_mode(mdev); + + if (mode == MLX5E_FEC_NOFEC) + return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); + if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0)) + return; + + switch (mode) { + case MLX5E_FEC_RS_528_514: + case MLX5E_FEC_RS_544_514: + case MLX5E_FEC_LLRS_272_257_1: + fec_set_rs_stats(fec_stats, out); + return; + case MLX5E_FEC_FIRECODE: + fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev)); + } +} + +static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) { u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) @@ -1181,6 +1269,13 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv, phy_corrected_bits); } +void mlx5e_stats_fec_get(struct mlx5e_priv *priv, + struct ethtool_fec_stats *fec_stats) +{ + fec_set_corrected_bits_total(priv, fec_stats); + fec_set_block_stats(priv, fec_stats); +} + #define PPORT_ETH_EXT_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_extended_cntrs_grp_data_layout.c##_high) @@ -2076,7 +2171,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) for (i = 0; i < NUM_PTP_CH_STATS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_ch_stats_desc[i].format); + "%s", ptp_ch_stats_desc[i].format); if (priv->tx_ptp_opened) { for (tc = 0; tc < priv->max_opened_tc; tc++) @@ -2197,21 +2292,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch, ch_stats_desc, j); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq, rq_stats_desc, j); for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq, xskrq_stats_desc, j); for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq, rq_xdpsq_stats_desc, j); } @@ -2219,17 +2314,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc], sq_stats_desc, j); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq, xsksq_stats_desc, j); for (j = 0; j < NUM_XDPSQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq, xdpsq_stats_desc, j); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 835caa1c7b74..3d908a7e1406 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -39,10 +39,6 @@ #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> -#include <linux/if_macvlan.h> -#include <net/tc_act/tc_pedit.h> -#include <net/tc_act/tc_csum.h> -#include <net/psample.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/bareudp.h> @@ -62,6 +58,7 @@ #include "en/mod_hdr.h" #include "en/tc_tun_encap.h" #include "en/tc/sample.h" +#include "en/tc/act/act.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -70,9 +67,6 @@ #include "lag/lag.h" #include "lag/mp.h" -#define nic_chains(priv) ((priv)->fs.tc.chains) -#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) - #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) @@ -209,12 +203,9 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, char *modact; int err; - err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts); - if (err) - return err; - - modact = mod_hdr_acts->actions + - (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ); + modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts); + if (IS_ERR(modact)) + return PTR_ERR(modact); /* Firmware has 5bit length field and 0 means 32bits */ if (mlen == 32) @@ -333,7 +324,7 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; char *modact; - modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ); + modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id); /* Firmware has 5bit length field and 0 means 32bits */ if (mlen == 32) @@ -403,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, ESWITCH); } -static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) +bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) { return flow_flag_test(flow, FT); } @@ -413,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, OFFLOADED); } -static int get_flow_name_space(struct mlx5e_tc_flow *flow) +int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow) { return mlx5e_is_eswitch_flow(flow) ? MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; @@ -424,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ? + return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : &priv->fs.tc.mod_hdr; } @@ -437,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_mod_hdr_handle *mh; mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), - get_flow_name_space(flow), + mlx5e_get_flow_namespace(flow), &parse_attr->mod_hdr_acts); if (IS_ERR(mh)) return PTR_ERR(mh); @@ -941,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { struct mlx5_flow_context *flow_context = &spec->flow_context; - struct mlx5_fs_chains *nic_chains = nic_chains(priv); + struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5_flow_destination dest[2] = {}; @@ -1076,7 +1067,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); if (err) return err; } @@ -1095,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { - struct mlx5_fs_chains *nic_chains = nic_chains(priv); + struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); mlx5_del_flow_rules(rule); @@ -1127,21 +1118,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mutex_lock(&priv->fs.tc.t_lock); if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && !IS_ERR_OR_NULL(tc->t)) { - mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); + mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); priv->fs.tc.t = NULL; } mutex_unlock(&priv->fs.tc.t_lock); - kvfree(attr->parse_attr); - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); - mlx5_fc_destroy(priv->mdev, attr->counter); + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) + mlx5_fc_destroy(priv->mdev, attr->counter); if (flow_flag_test(flow, HAIRPIN)) mlx5e_hairpin_flow_del(priv, flow); + kvfree(attr->parse_attr); kfree(flow->attr); } @@ -1196,21 +1187,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) goto offload_rule_0; - if (flow_flag_test(flow, CT)) { - mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); - return; - } - - if (flow_flag_test(flow, SAMPLE)) { - mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); - return; - } - if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); + if (flow_flag_test(flow, CT)) + mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); + else if (flow_flag_test(flow, SAMPLE)) + mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); + else offload_rule_0: - mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } struct mlx5_flow_handle * @@ -1308,8 +1294,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow) mutex_unlock(&uplink_priv->unready_flows_lock); } -static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); - bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) { struct mlx5_core_dev *out_mdev, *route_mdev; @@ -1324,7 +1308,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ route_mdev->coredev_type != MLX5_COREDEV_VF) return false; - return same_hw_devs(out_priv, route_priv); + return mlx5e_same_hw_devs(out_priv, route_priv); } int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) @@ -1371,7 +1355,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, struct mlx5_modify_hdr *mod_hdr; mod_hdr = mlx5_modify_header_alloc(priv->mdev, - get_flow_name_space(flow), + mlx5e_get_flow_namespace(flow), mod_hdr_acts->num_actions, mod_hdr_acts->actions); if (IS_ERR(mod_hdr)) @@ -1445,7 +1429,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG, metadata); if (err) - return err; + goto err_out; + + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; } } @@ -1461,13 +1447,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (attr->chain) { NL_SET_ERR_MSG_MOD(extack, "Internal port rule is only supported on chain 0"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } if (attr->dest_chain) { NL_SET_ERR_MSG_MOD(extack, "Internal port rule offload doesn't support goto action"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), @@ -1475,8 +1463,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, flow_flag_test(flow, EGRESS) ? MLX5E_TC_INT_PORT_EGRESS : MLX5E_TC_INT_PORT_INGRESS); - if (IS_ERR(int_port)) - return PTR_ERR(int_port); + if (IS_ERR(int_port)) { + err = PTR_ERR(int_port); + goto err_out; + } esw_attr->int_port = int_port; } @@ -1600,6 +1590,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); } + complete_all(&flow->del_hw_done); if (mlx5_flow_has_geneve_opt(flow)) mlx5_geneve_tlv_option_del(priv->mdev->geneve); @@ -1623,15 +1614,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { - dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts); + mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); if (vf_tun && attr->modify_hdr) mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); else mlx5e_detach_mod_hdr(priv, flow); } - kfree(attr->sample_attr); - kvfree(attr->parse_attr); - kvfree(attr->esw_attr->rx_tun_attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); @@ -1645,6 +1633,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); + kfree(attr->sample_attr); + kvfree(attr->esw_attr->rx_tun_attr); + kvfree(attr->parse_attr); kfree(flow->attr); } @@ -1947,6 +1938,111 @@ u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) return ip_version; } +/* Tunnel device follows RFC 6040, see include/net/inet_ecn.h. + * And changes inner ip_ecn depending on inner and outer ip_ecn as follows: + * +---------+----------------------------------------+ + * |Arriving | Arriving Outer Header | + * | Inner +---------+---------+---------+----------+ + * | Header | Not-ECT | ECT(0) | ECT(1) | CE | + * +---------+---------+---------+---------+----------+ + * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> | + * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* | + * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* | + * | CE | CE | CE | CE | CE | + * +---------+---------+---------+---------+----------+ + * + * Tc matches on inner after decapsulation on tunnel device, but hw offload matches + * the inner ip_ecn value before hardware decap action. + * + * Cells marked are changed from original inner packet ip_ecn value during decap, and + * so matching those values on inner ip_ecn before decap will fail. + * + * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn, + * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE, + * and such we can drop the inner ip_ecn=CE match. + */ + +static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv, + struct flow_cls_offload *f, + bool *match_inner_ecn) +{ + u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0; + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct netlink_ext_ack *extack = f->common.extack; + struct flow_match_ip match; + + *match_inner_ecn = true; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + flow_rule_match_enc_ip(rule, &match); + outer_ecn_key = match.key->tos & INET_ECN_MASK; + outer_ecn_mask = match.mask->tos & INET_ECN_MASK; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + flow_rule_match_ip(rule, &match); + inner_ecn_key = match.key->tos & INET_ECN_MASK; + inner_ecn_mask = match.mask->tos & INET_ECN_MASK; + } + + if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported"); + netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported"); + return -EOPNOTSUPP; + } + + if (!outer_ecn_mask) { + if (!inner_ecn_mask) + return 0; + + NL_SET_ERR_MSG_MOD(extack, + "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); + netdev_warn(priv->netdev, + "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported"); + return -EOPNOTSUPP; + } + + if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) { + NL_SET_ERR_MSG_MOD(extack, + "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); + netdev_warn(priv->netdev, + "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported"); + return -EOPNOTSUPP; + } + + if (!inner_ecn_mask) + return 0; + + /* Both inner and outer have full mask on ecn */ + + if (outer_ecn_key == INET_ECN_ECT_1) { + /* inner ecn might change by DECAP action */ + + NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported"); + netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported"); + return -EOPNOTSUPP; + } + + if (outer_ecn_key != INET_ECN_CE) + return 0; + + if (inner_ecn_key != INET_ECN_CE) { + /* Can't happen in software, as packet ecn will be changed to CE after decap */ + NL_SET_ERR_MSG_MOD(extack, + "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); + netdev_warn(priv->netdev, + "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported"); + return -EOPNOTSUPP; + } + + /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase, + * drop match on inner ecn + */ + *match_inner_ecn = false; + + return 0; +} + static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, @@ -2052,16 +2148,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) outer_headers); } -static void *get_match_headers_value(u32 flags, - struct mlx5_flow_spec *spec) +void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec) { return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? get_match_inner_headers_value(spec) : get_match_outer_headers_value(spec); } -static void *get_match_headers_criteria(u32 flags, - struct mlx5_flow_spec *spec) +void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec) { return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? get_match_inner_headers_criteria(spec) : @@ -2142,6 +2236,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; enum fs_flow_table_type fs_type; + bool match_inner_ecn = true; u16 addr_type = 0; u8 ip_proto = 0; u8 *match_level; @@ -2195,6 +2290,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, headers_c = get_match_inner_headers_criteria(spec); headers_v = get_match_inner_headers_value(spec); } + + err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn); + if (err) + return err; } err = mlx5e_flower_parse_meta(filter_dev, f); @@ -2418,10 +2517,12 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct flow_match_ip match; flow_rule_match_ip(rule, &match); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, - match.mask->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, - match.key->tos & 0x3); + if (match_inner_ecn) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, + match.mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, + match.key->tos & 0x3); + } MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, match.mask->tos >> 2); @@ -2606,55 +2707,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv, return err; } -struct pedit_headers { - struct ethhdr eth; - struct vlan_hdr vlan; - struct iphdr ip4; - struct ipv6hdr ip6; - struct tcphdr tcp; - struct udphdr udp; -}; - -struct pedit_headers_action { - struct pedit_headers vals; - struct pedit_headers masks; - u32 pedits; -}; - -static int pedit_header_offsets[] = { - [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), - [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), - [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), - [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), - [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), -}; - -#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) - -static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, - struct pedit_headers_action *hdrs, - struct netlink_ext_ack *extack) -{ - u32 *curr_pmask, *curr_pval; - - curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); - curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); - - if (*curr_pmask & mask) { /* disallow acting twice on the same location */ - NL_SET_ERR_MSG_MOD(extack, - "curr_pmask and new mask same. Acting twice on same location"); - goto out_err; - } - - *curr_pmask |= mask; - *curr_pval |= (val & mask); - - return 0; - -out_err: - return -EOPNOTSUPP; -} - struct mlx5_fields { u8 field; u8 field_bsize; @@ -2766,26 +2818,23 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; - int i, action_size, first, last, next_z; void *headers_c, *headers_v, *action, *vals_p; u32 *s_masks_p, *a_masks_p, s_mask, a_mask; struct mlx5e_tc_mod_hdr_acts *mod_acts; - struct mlx5_fields *f; unsigned long mask, field_mask; - int err; + int i, first, last, next_z; + struct mlx5_fields *f; u8 cmd; mod_acts = &parse_attr->mod_hdr_acts; - headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec); - headers_v = get_match_headers_value(*action_flags, &parse_attr->spec); + headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec); + headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec); set_masks = &hdrs[0].masks; add_masks = &hdrs[1].masks; set_vals = &hdrs[0].vals; add_vals = &hdrs[1].vals; - action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); - for (i = 0; i < ARRAY_SIZE(fields); i++) { bool skip; @@ -2853,18 +2902,16 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts); - if (err) { + action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts); + if (IS_ERR(action)) { NL_SET_ERR_MSG_MOD(extack, "too many pedit actions, can't offload"); mlx5_core_warn(priv->mdev, "mlx5: parsed %d pedit actions, can't do more\n", mod_acts->num_actions); - return err; + return PTR_ERR(action); } - action = mod_acts->actions + - (mod_acts->num_actions * action_size); MLX5_SET(set_action_in, action, action_type, cmd); MLX5_SET(set_action_in, action, field, f->field); @@ -2894,141 +2941,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, return 0; } -static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev, - int namespace) -{ - if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ - return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions); - else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ - return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); -} - -int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, - int namespace, - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) -{ - int action_size, new_num_actions, max_hw_actions; - size_t new_sz, old_sz; - void *ret; - - if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions) - return 0; - - action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto); - - max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev, - namespace); - new_num_actions = min(max_hw_actions, - mod_hdr_acts->actions ? - mod_hdr_acts->max_actions * 2 : 1); - if (mod_hdr_acts->max_actions == new_num_actions) - return -ENOSPC; - - new_sz = action_size * new_num_actions; - old_sz = mod_hdr_acts->max_actions * action_size; - ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL); - if (!ret) - return -ENOMEM; - - memset(ret + old_sz, 0, new_sz - old_sz); - mod_hdr_acts->actions = ret; - mod_hdr_acts->max_actions = new_num_actions; - - return 0; -} - -void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) -{ - kfree(mod_hdr_acts->actions); - mod_hdr_acts->actions = NULL; - mod_hdr_acts->num_actions = 0; - mod_hdr_acts->max_actions = 0; -} - static const struct pedit_headers zero_masks = {}; -static int -parse_pedit_to_modify_hdr(struct mlx5e_priv *priv, - const struct flow_action_entry *act, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - struct netlink_ext_ack *extack) -{ - u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; - int err = -EOPNOTSUPP; - u32 mask, val, offset; - u8 htype; - - htype = act->mangle.htype; - err = -EOPNOTSUPP; /* can't be all optimistic */ - - if (htype == FLOW_ACT_MANGLE_UNSPEC) { - NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); - goto out_err; - } - - if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) { - NL_SET_ERR_MSG_MOD(extack, - "The pedit offload action is not supported"); - goto out_err; - } - - mask = act->mangle.mask; - val = act->mangle.val; - offset = act->mangle.offset; - - err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack); - if (err) - goto out_err; - - hdrs[cmd].pedits++; - - return 0; -out_err: - return err; -} - -static int -parse_pedit_to_reformat(const struct flow_action_entry *act, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct netlink_ext_ack *extack) -{ - u32 mask, val, offset; - u32 *p; - - if (act->id != FLOW_ACTION_MANGLE) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported action id"); - return -EOPNOTSUPP; - } - - if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) { - NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported"); - return -EOPNOTSUPP; - } - - mask = ~act->mangle.mask; - val = act->mangle.val; - offset = act->mangle.offset; - p = (u32 *)&parse_attr->eth; - *(p + (offset >> 2)) |= (val & mask); - - return 0; -} - -static int parse_tc_pedit_action(struct mlx5e_priv *priv, - const struct flow_action_entry *act, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) -{ - if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) - return parse_pedit_to_reformat(act, parse_attr, extack); - - return parse_pedit_to_modify_hdr(priv, act, namespace, - parse_attr, hdrs, extack); -} - static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, struct pedit_headers_action *hdrs, @@ -3060,39 +2974,10 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, return 0; out_dealloc_parsed_actions: - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); return err; } -static bool csum_offload_supported(struct mlx5e_priv *priv, - u32 action, - u32 update_flags, - struct netlink_ext_ack *extack) -{ - u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | - TCA_CSUM_UPDATE_FLAG_UDP; - - /* The HW recalcs checksums only if re-writing headers */ - if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { - NL_SET_ERR_MSG_MOD(extack, - "TC csum action is only offloaded with pedit"); - netdev_warn(priv->netdev, - "TC csum action is only offloaded with pedit\n"); - return false; - } - - if (update_flags & ~prot_flags) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload TC csum action for some header/s"); - netdev_warn(priv->netdev, - "can't offload TC csum action for some header/s - flags %#x\n", - update_flags); - return false; - } - - return true; -} - struct ip_ttl_word { __u8 ttl; __u8 protocol; @@ -3215,8 +3100,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, u8 ip_proto; int i; - headers_c = get_match_headers_criteria(actions, spec); - headers_v = get_match_headers_value(actions, spec); + headers_c = mlx5e_get_match_headers_criteria(actions, spec); + headers_v = mlx5e_get_match_headers_value(actions, spec); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -3323,7 +3208,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv return priv->mdev == peer_priv->mdev; } -static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) +bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; u64 fsystem_guid, psystem_guid; @@ -3337,126 +3222,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } -static bool same_vf_reps(struct mlx5e_priv *priv, - struct net_device *out_dev) -{ - return mlx5e_eswitch_vf_rep(priv->netdev) && - priv->netdev == out_dev; -} - -static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, - const struct flow_action_entry *act, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - u32 *action, struct netlink_ext_ack *extack) -{ - u16 mask16 = VLAN_VID_MASK; - u16 val16 = act->vlan.vid & VLAN_VID_MASK; - const struct flow_action_entry pedit_act = { - .id = FLOW_ACTION_MANGLE, - .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, - .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), - .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), - .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), - }; - u8 match_prio_mask, match_prio_val; - void *headers_c, *headers_v; - int err; - - headers_c = get_match_headers_criteria(*action, &parse_attr->spec); - headers_v = get_match_headers_value(*action, &parse_attr->spec); - - if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && - MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { - NL_SET_ERR_MSG_MOD(extack, - "VLAN rewrite action must have VLAN protocol match"); - return -EOPNOTSUPP; - } - - match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); - match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); - if (act->vlan.prio != (match_prio_val & match_prio_mask)) { - NL_SET_ERR_MSG_MOD(extack, - "Changing VLAN prio is not supported"); - return -EOPNOTSUPP; - } - - err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack); - *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - - return err; -} - static int -add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, - struct pedit_headers_action *hdrs, - u32 *action, struct netlink_ext_ack *extack) -{ - const struct flow_action_entry prio_tag_act = { - .vlan.vid = 0, - .vlan.prio = - MLX5_GET(fte_match_set_lyr_2_4, - get_match_headers_value(*action, - &parse_attr->spec), - first_prio) & - MLX5_GET(fte_match_set_lyr_2_4, - get_match_headers_criteria(*action, - &parse_attr->spec), - first_prio), - }; - - return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, - &prio_tag_act, parse_attr, hdrs, action, - extack); -} - -static int validate_goto_chain(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - const struct flow_action_entry *act, - u32 actions, - struct netlink_ext_ack *extack) +parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, + struct flow_action *flow_action) { - bool is_esw = mlx5e_is_eswitch_flow(flow); + struct netlink_ext_ack *extack = parse_state->extack; + struct mlx5e_tc_flow *flow = parse_state->flow; struct mlx5_flow_attr *attr = flow->attr; - bool ft_flow = mlx5e_is_ft_flow(flow); - u32 dest_chain = act->chain_index; - struct mlx5_fs_chains *chains; - struct mlx5_eswitch *esw; - u32 reformat_and_fwd; - u32 max_chain; + enum mlx5_flow_namespace_type ns_type; + struct mlx5e_priv *priv = flow->priv; + const struct flow_action_entry *act; + struct mlx5e_tc_act *tc_act; + int err, i; - esw = priv->mdev->priv.eswitch; - chains = is_esw ? esw_chains(esw) : nic_chains(priv); - max_chain = mlx5_chains_get_chain_range(chains); - reformat_and_fwd = is_esw ? - MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) : - MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table); - - if (ft_flow) { - NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); - return -EOPNOTSUPP; - } + ns_type = mlx5e_get_flow_namespace(flow); - if (!mlx5_chains_backwards_supported(chains) && - dest_chain <= attr->chain) { - NL_SET_ERR_MSG_MOD(extack, - "Goto lower numbered chain isn't supported"); - return -EOPNOTSUPP; - } + flow_action_for_each(i, act, flow_action) { + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act) { + NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action"); + return -EOPNOTSUPP; + } - if (dest_chain > max_chain) { - NL_SET_ERR_MSG_MOD(extack, - "Requested destination chain is out of supported range"); - return -EOPNOTSUPP; + if (!tc_act->can_offload(parse_state, act, i)) + return -EOPNOTSUPP; + + err = tc_act->parse_action(parse_state, act, priv, attr); + if (err) + return err; } - if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | - MLX5_FLOW_CONTEXT_ACTION_DECAP) && - !reformat_and_fwd) { - NL_SET_ERR_MSG_MOD(extack, - "Goto chain is not allowed if action has reformat or decap"); - return -EOPNOTSUPP; + flow_action_for_each(i, act, flow_action) { + tc_act = mlx5e_tc_act_get(act->id, ns_type); + if (!tc_act || !tc_act->post_parse || + !tc_act->can_offload(parse_state, act, i)) + continue; + + err = tc_act->post_parse(parse_state, priv, attr); + if (err) + return err; } return 0; @@ -3477,19 +3281,19 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) return 0; - ns_type = get_flow_name_space(flow); + ns_type = mlx5e_get_flow_namespace(flow); err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs, &attr->action, extack); if (err) return err; - /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ if (parse_attr->mod_hdr_acts.num_actions > 0) return 0; + /* In case all pedit actions are skipped, remove the MOD_HDR flag. */ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); if (ns_type != MLX5_FLOW_NAMESPACE_FDB) return 0; @@ -3502,19 +3306,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv, } static int -parse_tc_nic_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +flow_action_supported(struct flow_action *flow_action, + struct netlink_ext_ack *extack) { - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_flow_attr *attr = flow->attr; - struct pedit_headers_action hdrs[2] = {}; - const struct flow_action_entry *act; - struct mlx5_nic_flow_attr *nic_attr; - u32 action = 0; - int err, i; - if (!flow_action_has_entries(flow_action)) { NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); return -EINVAL; @@ -3526,106 +3320,35 @@ parse_tc_nic_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - nic_attr = attr->nic_attr; - nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - parse_attr = attr->parse_attr; - - flow_action_for_each(i, act, flow_action) { - switch (act->id) { - case FLOW_ACTION_ACCEPT: - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_MANGLE: - case FLOW_ACTION_ADD: - err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, hdrs, NULL, extack); - if (err) - return err; - - action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - break; - case FLOW_ACTION_VLAN_MANGLE: - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_KERNEL, - act, parse_attr, hdrs, - &action, extack); - if (err) - return err; - - break; - case FLOW_ACTION_CSUM: - if (csum_offload_supported(priv, action, - act->csum_flags, - extack)) - break; - - return -EOPNOTSUPP; - case FLOW_ACTION_REDIRECT: { - struct net_device *peer_dev = act->dev; - - if (priv->netdev->netdev_ops == peer_dev->netdev_ops && - same_hw_devs(priv, netdev_priv(peer_dev))) { - parse_attr->mirred_ifindex[0] = peer_dev->ifindex; - flow_flag_set(flow, HAIRPIN); - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - } else { - NL_SET_ERR_MSG_MOD(extack, - "device is not on same HW, can't offload"); - netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", - peer_dev->name); - return -EOPNOTSUPP; - } - } - break; - case FLOW_ACTION_MARK: { - u32 mark = act->mark; - - if (mark & ~MLX5E_TC_FLOW_ID_MASK) { - NL_SET_ERR_MSG_MOD(extack, - "Bad flow mark - only 16 bit is supported"); - return -EOPNOTSUPP; - } - - nic_attr->flow_tag = mark; - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } - break; - case FLOW_ACTION_GOTO: - err = validate_goto_chain(priv, flow, act, action, - extack); - if (err) - return err; + return 0; +} - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->dest_chain = act->chain_index; - break; - case FLOW_ACTION_CT: - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); - if (err) - return err; +static int +parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + struct mlx5e_tc_act_parse_state *parse_state; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct pedit_headers_action *hdrs; + int err; - flow_flag_set(flow, CT); - break; - default: - NL_SET_ERR_MSG_MOD(extack, - "The offload action is not supported in NIC action"); - return -EOPNOTSUPP; - } - } + err = flow_action_supported(flow_action, extack); + if (err) + return err; - attr->action = action; + attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + parse_attr = attr->parse_attr; + parse_state = &parse_attr->parse_state; + mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); + parse_state->ct_priv = get_ct_priv(priv); + hdrs = parse_state->hdrs; - if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { - NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } + err = parse_tc_actions(parse_state, flow_action); + if (err) + return err; err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); if (err) @@ -3647,147 +3370,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && mlx5e_eswitch_vf_rep(priv->netdev) && mlx5e_eswitch_vf_rep(peer_netdev) && - same_hw_devs(priv, peer_priv)); -} - -static int parse_tc_vlan_action(struct mlx5e_priv *priv, - const struct flow_action_entry *act, - struct mlx5_esw_flow_attr *attr, - u32 *action, - struct netlink_ext_ack *extack) -{ - u8 vlan_idx = attr->total_vlan; - - if (vlan_idx >= MLX5_FS_VLAN_DEPTH) { - NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported"); - return -EOPNOTSUPP; - } - - switch (act->id) { - case FLOW_ACTION_VLAN_POP: - if (vlan_idx) { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan pop action is not supported"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; - } else { - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } - break; - case FLOW_ACTION_VLAN_PUSH: - attr->vlan_vid[vlan_idx] = act->vlan.vid; - attr->vlan_prio[vlan_idx] = act->vlan.prio; - attr->vlan_proto[vlan_idx] = act->vlan.proto; - if (!attr->vlan_proto[vlan_idx]) - attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); - - if (vlan_idx) { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, - MLX5_FS_VLAN_DEPTH)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan push action is not supported for vlan depth > 1"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; - } else { - if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && - (act->vlan.proto != htons(ETH_P_8021Q) || - act->vlan.prio)) { - NL_SET_ERR_MSG_MOD(extack, - "vlan push action is not supported"); - return -EOPNOTSUPP; - } - - *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - } - break; - default: - NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN"); - return -EINVAL; - } - - attr->total_vlan = vlan_idx + 1; - - return 0; -} - -static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev, - struct net_device *out_dev) -{ - struct net_device *fdb_out_dev = out_dev; - struct net_device *uplink_upper; - - rcu_read_lock(); - uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev); - if (uplink_upper && netif_is_lag_master(uplink_upper) && - uplink_upper == out_dev) { - fdb_out_dev = uplink_dev; - } else if (netif_is_lag_master(out_dev)) { - fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev)); - if (fdb_out_dev && - (!mlx5e_eswitch_rep(fdb_out_dev) || - !netdev_port_same_parent_id(fdb_out_dev, uplink_dev))) - fdb_out_dev = NULL; - } - rcu_read_unlock(); - return fdb_out_dev; -} - -static int add_vlan_push_action(struct mlx5e_priv *priv, - struct mlx5_flow_attr *attr, - struct net_device **out_dev, - u32 *action, - struct netlink_ext_ack *extack) -{ - struct net_device *vlan_dev = *out_dev; - struct flow_action_entry vlan_act = { - .id = FLOW_ACTION_VLAN_PUSH, - .vlan.vid = vlan_dev_vlan_id(vlan_dev), - .vlan.proto = vlan_dev_vlan_proto(vlan_dev), - .vlan.prio = 0, - }; - int err; - - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); - if (err) - return err; - - rcu_read_lock(); - *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev)); - rcu_read_unlock(); - if (!*out_dev) - return -ENODEV; - - if (is_vlan_dev(*out_dev)) - err = add_vlan_push_action(priv, attr, out_dev, action, extack); - - return err; -} - -static int add_vlan_pop_action(struct mlx5e_priv *priv, - struct mlx5_flow_attr *attr, - u32 *action, - struct netlink_ext_ack *extack) -{ - struct flow_action_entry vlan_act = { - .id = FLOW_ACTION_VLAN_POP, - }; - int nest_level, err = 0; - - nest_level = attr->parse_attr->filter_dev->lower_level - - priv->netdev->lower_level; - while (nest_level--) { - err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack); - if (err) - return err; - } - - return err; + mlx5e_same_hw_devs(priv, peer_priv)); } static bool same_hw_reps(struct mlx5e_priv *priv, @@ -3799,7 +3382,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv, return mlx5e_eswitch_rep(priv->netdev) && mlx5e_eswitch_rep(peer_netdev) && - same_hw_devs(priv, peer_priv); + mlx5e_same_hw_devs(priv, peer_priv); } static bool is_lag_dev(struct mlx5e_priv *priv, @@ -3823,66 +3406,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, same_port_devs(priv, netdev_priv(out_dev)); } -static bool is_duplicated_output_device(struct net_device *dev, - struct net_device *out_dev, - int *ifindexes, int if_count, - struct netlink_ext_ack *extack) -{ - int i; - - for (i = 0; i < if_count; i++) { - if (ifindexes[i] == out_dev->ifindex) { - NL_SET_ERR_MSG_MOD(extack, - "can't duplicate output to same device"); - netdev_err(dev, "can't duplicate output to same device: %s\n", - out_dev->name); - return true; - } - } - - return false; -} - -static int verify_uplink_forwarding(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *out_dev, - struct netlink_ext_ack *extack) -{ - struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *rep_priv; - - /* Forwarding non encapsulated traffic between - * uplink ports is allowed only if - * termination_table_raw_traffic cap is set. - * - * Input vport was stored attr->in_rep. - * In LAG case, *priv* is the private data of - * uplink which may be not the input vport. - */ - rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep); - - if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) && - mlx5e_eswitch_uplink_rep(out_dev))) - return 0; - - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, - termination_table_raw_traffic)) { - NL_SET_ERR_MSG_MOD(extack, - "devices are both uplink, can't offload forwarding"); - pr_err("devices %s %s are both uplink, can't offload forwarding\n", - priv->netdev->name, out_dev->name); - return -EOPNOTSUPP; - } else if (out_dev != rep_priv->netdev) { - NL_SET_ERR_MSG_MOD(extack, - "devices are not the same uplink, can't offload forwarding"); - pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n", - priv->netdev->name, out_dev->name); - return -EOPNOTSUPP; - } - return 0; -} - int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, int ifindex, @@ -3922,384 +3445,33 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, return 0; } -static int parse_tc_fdb_actions(struct mlx5e_priv *priv, - struct flow_action *flow_action, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) +static int +parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { - struct pedit_headers_action hdrs[2] = {}; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_act_parse_state *parse_state; struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5e_sample_attr sample_attr = {}; - const struct ip_tunnel_info *info = NULL; struct mlx5_flow_attr *attr = flow->attr; - int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; - bool ft_flow = mlx5e_is_ft_flow(flow); - const struct flow_action_entry *act; struct mlx5_esw_flow_attr *esw_attr; - bool encap = false, decap = false; - u32 action = attr->action; - int err, i, if_count = 0; - bool ptype_host = false; - bool mpls_push = false; - - if (!flow_action_has_entries(flow_action)) { - NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries"); - return -EINVAL; - } + struct pedit_headers_action *hdrs; + int err; - if (!flow_action_hw_stats_check(flow_action, extack, - FLOW_ACTION_HW_STATS_DELAYED_BIT)) { - NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported"); - return -EOPNOTSUPP; - } + err = flow_action_supported(flow_action, extack); + if (err) + return err; esw_attr = attr->esw_attr; parse_attr = attr->parse_attr; + parse_state = &parse_attr->parse_state; + mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); + parse_state->ct_priv = get_ct_priv(priv); + hdrs = parse_state->hdrs; - flow_action_for_each(i, act, flow_action) { - switch (act->id) { - case FLOW_ACTION_ACCEPT: - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT; - break; - case FLOW_ACTION_PTYPE: - if (act->ptype != PACKET_HOST) { - NL_SET_ERR_MSG_MOD(extack, - "skbedit ptype is only supported with type host"); - return -EOPNOTSUPP; - } - - ptype_host = true; - break; - case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - break; - case FLOW_ACTION_TRAP: - if (!flow_offload_has_one_action(flow_action)) { - NL_SET_ERR_MSG_MOD(extack, - "action trap is supported as a sole action only"); - return -EOPNOTSUPP; - } - action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT); - attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; - break; - case FLOW_ACTION_MPLS_PUSH: - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, - reformat_l2_to_l3_tunnel) || - act->mpls_push.proto != htons(ETH_P_MPLS_UC)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls push is supported only for mpls_uc protocol"); - return -EOPNOTSUPP; - } - mpls_push = true; - break; - case FLOW_ACTION_MPLS_POP: - /* we only support mpls pop if it is the first action - * and the filter net device is bareudp. Subsequent - * actions can be pedit and the last can be mirred - * egress redirect. - */ - if (i) { - NL_SET_ERR_MSG_MOD(extack, - "mpls pop supported only as first action"); - return -EOPNOTSUPP; - } - if (!netif_is_bareudp(parse_attr->filter_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls pop supported only on bareudp devices"); - return -EOPNOTSUPP; - } - - parse_attr->eth.h_proto = act->mpls_pop.proto; - action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_flag_set(flow, L3_TO_L2_DECAP); - break; - case FLOW_ACTION_MANGLE: - case FLOW_ACTION_ADD: - err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, hdrs, flow, extack); - if (err) - return err; - - if (!flow_flag_test(flow, L3_TO_L2_DECAP)) { - action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - esw_attr->split_count = esw_attr->out_count; - } - break; - case FLOW_ACTION_CSUM: - if (csum_offload_supported(priv, action, - act->csum_flags, extack)) - break; - - return -EOPNOTSUPP; - case FLOW_ACTION_REDIRECT_INGRESS: { - struct net_device *out_dev; - - out_dev = act->dev; - if (!out_dev) - return -EOPNOTSUPP; - - if (!netif_is_ovs_master(out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to ingress is supported only for OVS internal ports"); - return -EOPNOTSUPP; - } - - if (netif_is_ovs_master(parse_attr->filter_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to ingress is not supported from internal port"); - return -EOPNOTSUPP; - } - - if (!ptype_host) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to int port ingress requires ptype=host action"); - return -EOPNOTSUPP; - } - - if (esw_attr->out_count) { - NL_SET_ERR_MSG_MOD(extack, - "redirect to int port ingress is supported only as single destination"); - return -EOPNOTSUPP; - } - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - - err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex, - MLX5E_TC_INT_PORT_INGRESS, - &action, esw_attr->out_count); - if (err) - return err; - - esw_attr->out_count++; - - break; - } - case FLOW_ACTION_REDIRECT: - case FLOW_ACTION_MIRRED: { - struct mlx5e_priv *out_priv; - struct net_device *out_dev; - - out_dev = act->dev; - if (!out_dev) { - /* out_dev is NULL when filters with - * non-existing mirred device are replayed to - * the driver. - */ - return -EINVAL; - } - - if (mpls_push && !netif_is_bareudp(out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "mpls is supported only through a bareudp device"); - return -EOPNOTSUPP; - } - - if (ft_flow && out_dev == priv->netdev) { - /* Ignore forward to self rules generated - * by adding both mlx5 devs to the flow table - * block on a normal nft offload setup. - */ - return -EOPNOTSUPP; - } - - if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { - NL_SET_ERR_MSG_MOD(extack, - "can't support more output ports, can't offload forwarding"); - netdev_warn(priv->netdev, - "can't support more than %d output ports, can't offload forwarding\n", - esw_attr->out_count); - return -EOPNOTSUPP; - } - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - if (encap) { - parse_attr->mirred_ifindex[esw_attr->out_count] = - out_dev->ifindex; - parse_attr->tun_info[esw_attr->out_count] = - mlx5e_dup_tun_info(info); - if (!parse_attr->tun_info[esw_attr->out_count]) - return -ENOMEM; - encap = false; - esw_attr->dests[esw_attr->out_count].flags |= - MLX5_ESW_DEST_ENCAP; - esw_attr->out_count++; - /* attr->dests[].rep is resolved when we - * handle encap - */ - } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - - if (is_duplicated_output_device(priv->netdev, - out_dev, - ifindexes, - if_count, - extack)) - return -EOPNOTSUPP; - - ifindexes[if_count] = out_dev->ifindex; - if_count++; - - out_dev = get_fdb_out_dev(uplink_dev, out_dev); - if (!out_dev) - return -ENODEV; - - if (is_vlan_dev(out_dev)) { - err = add_vlan_push_action(priv, attr, - &out_dev, - &action, extack); - if (err) - return err; - } - - if (is_vlan_dev(parse_attr->filter_dev)) { - err = add_vlan_pop_action(priv, attr, - &action, extack); - if (err) - return err; - } - - if (netif_is_macvlan(out_dev)) - out_dev = macvlan_dev_real_dev(out_dev); - - err = verify_uplink_forwarding(priv, flow, out_dev, extack); - if (err) - return err; - - if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "devices are not on same switch HW, can't offload forwarding"); - return -EOPNOTSUPP; - } - - if (same_vf_reps(priv, out_dev)) { - NL_SET_ERR_MSG_MOD(extack, - "can't forward from a VF to itself"); - return -EOPNOTSUPP; - } - - out_priv = netdev_priv(out_dev); - rpriv = out_priv->ppriv; - esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; - esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; - esw_attr->out_count++; - } else if (netif_is_ovs_master(out_dev)) { - err = mlx5e_set_fwd_to_int_port_actions(priv, attr, - out_dev->ifindex, - MLX5E_TC_INT_PORT_EGRESS, - &action, - esw_attr->out_count); - if (err) - return err; - - esw_attr->out_count++; - } else if (parse_attr->filter_dev != priv->netdev) { - /* All mlx5 devices are called to configure - * high level device filters. Therefore, the - * *attempt* to install a filter on invalid - * eswitch should not trigger an explicit error - */ - return -EINVAL; - } else { - NL_SET_ERR_MSG_MOD(extack, - "devices are not on same switch HW, can't offload forwarding"); - netdev_warn(priv->netdev, - "devices %s %s not on same switch HW, can't offload forwarding\n", - priv->netdev->name, - out_dev->name); - return -EOPNOTSUPP; - } - } - break; - case FLOW_ACTION_TUNNEL_ENCAP: - info = act->tunnel; - if (info) { - encap = true; - } else { - NL_SET_ERR_MSG_MOD(extack, - "Zero tunnel attributes is not supported"); - return -EOPNOTSUPP; - } - - break; - case FLOW_ACTION_VLAN_PUSH: - case FLOW_ACTION_VLAN_POP: - if (act->id == FLOW_ACTION_VLAN_PUSH && - (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { - /* Replace vlan pop+push with vlan modify */ - action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_FDB, - act, parse_attr, hdrs, - &action, extack); - } else { - err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack); - } - if (err) - return err; - - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_VLAN_MANGLE: - err = add_vlan_rewrite_action(priv, - MLX5_FLOW_NAMESPACE_FDB, - act, parse_attr, hdrs, - &action, extack); - if (err) - return err; - - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_TUNNEL_DECAP: - decap = true; - break; - case FLOW_ACTION_GOTO: - err = validate_goto_chain(priv, flow, act, action, - extack); - if (err) - return err; - - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - attr->dest_chain = act->chain_index; - break; - case FLOW_ACTION_CT: - if (flow_flag_test(flow, SAMPLE)) { - NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); - return -EOPNOTSUPP; - } - err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack); - if (err) - return err; - - flow_flag_set(flow, CT); - esw_attr->split_count = esw_attr->out_count; - break; - case FLOW_ACTION_SAMPLE: - if (flow_flag_test(flow, CT)) { - NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); - return -EOPNOTSUPP; - } - sample_attr.rate = act->sample.rate; - sample_attr.group_num = act->sample.psample_group->group_num; - if (act->sample.truncate) - sample_attr.trunc_size = act->sample.trunc_size; - flow_flag_set(flow, SAMPLE); - break; - default: - NL_SET_ERR_MSG_MOD(extack, - "The offload action is not supported in FDB action"); - return -EOPNOTSUPP; - } - } + err = parse_tc_actions(parse_state, flow_action); + if (err) + return err; /* Forward to/from internal port can only have 1 dest */ if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && @@ -4309,23 +3481,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - /* always set IP version for indirect table handling */ - attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); - - if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && - action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) { - /* For prio tag mode, replace vlan pop with rewrite vlan prio - * tag rewrite. - */ - action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs, - &action, extack); - if (err) - return err; - } - - attr->action = action; - err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack); if (err) return err; @@ -4333,30 +3488,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; - if (attr->dest_chain && decap) { - /* It can be supported if we'll create a mapping for - * the tunnel device only (without tunnel), and set - * this tunnel id with this decap flow. - * - * On restore (miss), we'll just set this saved tunnel - * device. - */ - - NL_SET_ERR_MSG(extack, "Decap with goto isn't supported"); - netdev_warn(priv->netdev, "Decap with goto isn't supported"); - return -EOPNOTSUPP; - } - - /* Allocate sample attribute only when there is a sample action and - * no errors after parsing. - */ - if (flow_flag_test(flow, SAMPLE)) { - attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); - if (!attr->sample_attr) - return -ENOMEM; - *attr->sample_attr = sample_attr; - } - return 0; } @@ -4453,7 +3584,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->cookie = f->cookie; flow->priv = priv; - attr = mlx5_alloc_flow_attr(get_flow_name_space(flow)); + attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow)); if (!attr) goto err_free; @@ -4465,6 +3596,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, INIT_LIST_HEAD(&flow->l3_to_l2_reformat); refcount_set(&flow->refcnt, 1); init_completion(&flow->init_done); + init_completion(&flow->del_hw_done); *__flow = flow; *__parse_attr = parse_attr; @@ -4547,6 +3679,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; + /* always set IP version for indirect table handling */ + flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; @@ -4708,7 +3843,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, err_free: flow_flag_set(flow, FAILED); - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); mlx5e_flow_put(priv, flow); out: return err; @@ -5008,14 +4143,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, struct tc_cls_matchall_offload *ma) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct netlink_ext_ack *extack = ma->common.extack; - if (!mlx5_esw_qos_enabled(esw)) { - NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device"); - return -EOPNOTSUPP; - } - if (ma->common.prio != 1) { NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); return -EINVAL; @@ -5057,7 +4186,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, u16 peer_vhca_id; int bkt; - if (!same_hw_devs(priv, peer_priv)) + if (!mlx5e_same_hw_devs(priv, peer_priv)) return; peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index fdb222793027..5ffae9b13066 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -151,7 +151,6 @@ enum { int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); -bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct flow_cls_offload *f, unsigned long flags); @@ -247,11 +246,6 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow); -int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, - int namespace, - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); -void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); - struct mlx5e_tc_flow; u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 792e0d6aa861..48a45aa54a3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -19,6 +19,7 @@ #include "lib/clock.h" #include "diag/fw_tracer.h" #include "mlx5_irq.h" +#include "devlink.h" enum { MLX5_EQE_OWNER_INIT_VAL = 0x1, @@ -58,6 +59,8 @@ struct mlx5_eq_table { struct mutex lock; /* sync async eqs creations */ int num_comp_eqs; struct mlx5_irq_table *irq_table; + struct mlx5_irq **comp_irqs; + struct mlx5_irq *ctrl_irq; #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif @@ -265,8 +268,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; u8 log_eq_stride = ilog2(MLX5_EQE_SIZE); struct mlx5_priv *priv = &dev->priv; - u16 vecidx = param->irq_index; __be64 *pas; + u16 vecidx; void *eqc; int inlen; u32 *in; @@ -288,20 +291,16 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc); init_eq_buf(eq); - eq->irq = mlx5_irq_request(dev, vecidx, param->affinity); - if (IS_ERR(eq->irq)) { - err = PTR_ERR(eq->irq); - goto err_buf; - } - + eq->irq = param->irq; vecidx = mlx5_irq_get_index(eq->irq); + inlen = MLX5_ST_SZ_BYTES(create_eq_in) + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; - goto err_irq; + goto err_buf; } pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); @@ -345,8 +344,6 @@ err_eq: err_in: kvfree(in); -err_irq: - mlx5_irq_release(eq->irq); err_buf: mlx5_frag_buf_free(dev, &eq->frag_buf); return err; @@ -400,7 +397,6 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); - mlx5_irq_release(eq->irq); mlx5_frag_buf_free(dev, &eq->frag_buf); return err; @@ -593,11 +589,8 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, eq->irq_nb.notifier_call = mlx5_eq_async_int; spin_lock_init(&eq->lock); - if (!zalloc_cpumask_var(¶m->affinity, GFP_KERNEL)) - return -ENOMEM; err = create_async_eq(dev, &eq->core, param); - free_cpumask_var(param->affinity); if (err) { mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); return err; @@ -622,17 +615,38 @@ static void cleanup_async_eq(struct mlx5_core_dev *dev, name, err); } +static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(dev); + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, + &val); + if (!err) + return val.vu32; + mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); + return MLX5_NUM_ASYNC_EQE; +} static int create_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_param param = {}; int err; + /* All the async_eqs are using single IRQ, request one IRQ and share its + * index among all the async_eqs of this device. + */ + table->ctrl_irq = mlx5_ctrl_irq_request(dev); + if (IS_ERR(table->ctrl_irq)) + return PTR_ERR(table->ctrl_irq); + MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); mlx5_eq_notifier_register(dev, &table->cq_err_nb); param = (struct mlx5_eq_param) { - .irq_index = MLX5_IRQ_EQ_CTRL, + .irq = table->ctrl_irq, .nent = MLX5_NUM_CMD_EQE, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, }; @@ -645,8 +659,8 @@ static int create_async_eqs(struct mlx5_core_dev *dev) mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); param = (struct mlx5_eq_param) { - .irq_index = MLX5_IRQ_EQ_CTRL, - .nent = MLX5_NUM_ASYNC_EQE, + .irq = table->ctrl_irq, + .nent = async_eq_depth_devlink_param_get(dev), }; gather_async_events_mask(dev, param.mask); @@ -655,7 +669,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) goto err2; param = (struct mlx5_eq_param) { - .irq_index = MLX5_IRQ_EQ_CTRL, + .irq = table->ctrl_irq, .nent = /* TODO: sriov max_vf + */ 1, .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, }; @@ -674,6 +688,7 @@ err2: err1: mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); + mlx5_ctrl_irq_release(table->ctrl_irq); return err; } @@ -688,6 +703,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) cleanup_async_eq(dev, &table->cmd_eq, "cmd"); mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); + mlx5_ctrl_irq_release(table->ctrl_irq); } struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) @@ -715,12 +731,10 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL); int err; - if (!cpumask_available(param->affinity)) - return ERR_PTR(-EINVAL); - if (!eq) return ERR_PTR(-ENOMEM); + param->irq = dev->priv.eq_table->ctrl_irq; err = create_async_eq(dev, eq, param); if (err) { kvfree(eq); @@ -780,6 +794,54 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) } EXPORT_SYMBOL(mlx5_eq_update_ci); +static void comp_irqs_release(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + + if (mlx5_core_is_sf(dev)) + mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs); + else + mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs); + kfree(table->comp_irqs); +} + +static int comp_irqs_request(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + int ncomp_eqs = table->num_comp_eqs; + u16 *cpus; + int ret; + int i; + + ncomp_eqs = table->num_comp_eqs; + table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL); + if (!table->comp_irqs) + return -ENOMEM; + if (mlx5_core_is_sf(dev)) { + ret = mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs); + if (ret < 0) + goto free_irqs; + return ret; + } + + cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL); + if (!cpus) { + ret = -ENOMEM; + goto free_irqs; + } + for (i = 0; i < ncomp_eqs; i++) + cpus[i] = cpumask_local_spread(i, dev->priv.numa_node); + ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs); + kfree(cpus); + if (ret < 0) + goto free_irqs; + return ret; + +free_irqs: + kfree(table->comp_irqs); + return ret; +} + static void destroy_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; @@ -794,6 +856,22 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev) tasklet_disable(&eq->tasklet_ctx.task); kfree(eq); } + comp_irqs_release(dev); +} + +static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(dev); + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, + &val); + if (!err) + return val.vu32; + mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err); + return MLX5_COMP_EQ_SIZE; } static int create_comp_eqs(struct mlx5_core_dev *dev) @@ -805,12 +883,13 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) int err; int i; + ncomp_eqs = comp_irqs_request(dev); + if (ncomp_eqs < 0) + return ncomp_eqs; INIT_LIST_HEAD(&table->comp_eqs_list); - ncomp_eqs = table->num_comp_eqs; - nent = MLX5_COMP_EQ_SIZE; + nent = comp_eq_depth_devlink_param_get(dev); for (i = 0; i < ncomp_eqs; i++) { struct mlx5_eq_param param = {}; - int vecidx = i; eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -825,18 +904,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) eq->irq_nb.notifier_call = mlx5_eq_comp_int; param = (struct mlx5_eq_param) { - .irq_index = vecidx, + .irq = table->comp_irqs[i], .nent = nent, }; - if (!zalloc_cpumask_var(¶m.affinity, GFP_KERNEL)) { - err = -ENOMEM; - goto clean_eq; - } - cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node), - param.affinity); err = create_map_eq(dev, &eq->core, ¶m); - free_cpumask_var(param.affinity); if (err) goto clean_eq; err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); @@ -850,7 +922,9 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) list_add_tail(&eq->list, &table->comp_eqs_list); } + table->num_comp_eqs = ncomp_eqs; return 0; + clean_eq: kfree(eq); clean: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index 425c91814b34..c275fe028b6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -14,6 +14,7 @@ #include "fs_core.h" #include "esw/indir_table.h" #include "lib/fs_chains.h" +#include "en/mod_hdr.h" #define MLX5_ESW_INDIR_TABLE_SIZE 128 #define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2) @@ -226,7 +227,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, goto err_handle; } - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); rule->handle = handle; rule->vni = esw_attr->rx_tun_attr->vni; rule->mh = flow_act.modify_hdr; @@ -243,7 +244,7 @@ err_table: mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr); err_mod_hdr_alloc: err_mod_hdr_regc1: - dealloc_mod_hdr_actions(&mod_acts); + mlx5e_mod_hdr_dealloc(&mod_acts); err_mod_hdr_regc0: err_ethertype: kfree(rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index df277a6cddc0..9d17206d1625 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -431,7 +431,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int err = 0; if (!mlx5_esw_allowed(esw)) - return -EPERM; + return vlan ? -EPERM : 0; if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; @@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, return PTR_ERR(evport); mutex_lock(&esw->state_lock); - err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL); - if (!err) - err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL); + err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate); mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index c6cc67cb4f6a..11bbcd5f5b8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, /* If vports min rate divider is 0 but their group has bw_share configured, then * need to set bw_share for vports to minimal value. */ - if (!group_level && !max_guarantee && group->bw_share) + if (!group_level && !max_guarantee && group && group->bw_share) return 1; return 0; } @@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid return 0; } -int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, - struct mlx5_vport *evport, - u32 min_rate, - struct netlink_ext_ack *extack) +static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, + u32 min_rate, struct netlink_ext_ack *extack) { u32 fw_max_bw_share, previous_min_rate; bool min_rate_supported; @@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, return err; } -int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, - struct mlx5_vport *evport, - u32 max_rate, - struct netlink_ext_ack *extack) +static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, + u32 max_rate, struct netlink_ext_ack *extack) { u32 act_max_rate = max_rate; bool max_rate_supported; @@ -423,7 +419,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, return err; /* Recalculate bw share weights of old and new groups */ - if (vport->qos.bw_share) { + if (vport->qos.bw_share || new_group->bw_share) { esw_qos_normalize_vports_min_rate(esw, curr_group, extack); esw_qos_normalize_vports_min_rate(esw, new_group, extack); } @@ -432,16 +428,13 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, } static struct mlx5_esw_rate_group * -esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_esw_rate_group *group; u32 divider; int err; - if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth)) - return ERR_PTR(-EOPNOTSUPP); - group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); @@ -482,9 +475,32 @@ err_sched_elem: return ERR_PTR(err); } -static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, - struct mlx5_esw_rate_group *group, - struct netlink_ext_ack *extack) +static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack); +static void esw_qos_put(struct mlx5_eswitch *esw); + +static struct mlx5_esw_rate_group * +esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group; + int err; + + if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth)) + return ERR_PTR(-EOPNOTSUPP); + + err = esw_qos_get(esw, extack); + if (err) + return ERR_PTR(err); + + group = __esw_qos_create_rate_group(esw, extack); + if (IS_ERR(group)) + esw_qos_put(esw); + + return group; +} + +static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) { u32 divider; int err; @@ -503,7 +519,21 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix); + kfree(group); + + return err; +} + +static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + int err; + + err = __esw_qos_destroy_rate_group(esw, group, extack); + esw_qos_put(esw); + return err; } @@ -526,7 +556,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) return false; } -void mlx5_esw_qos_create(struct mlx5_eswitch *esw) +static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_core_dev *dev = esw->dev; @@ -534,14 +564,10 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw) int err; if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return; + return -EOPNOTSUPP; if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) - return; - - mutex_lock(&esw->state_lock); - if (esw->qos.enabled) - goto unlock; + return -EOPNOTSUPP; MLX5_SET(scheduling_context, tsar_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); @@ -555,75 +581,94 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw) &esw->qos.root_tsar_ix); if (err) { esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err); - goto unlock; + return err; } INIT_LIST_HEAD(&esw->qos.groups); if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { - esw->qos.group0 = esw_qos_create_rate_group(esw, NULL); + esw->qos.group0 = __esw_qos_create_rate_group(esw, extack); if (IS_ERR(esw->qos.group0)) { esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n", PTR_ERR(esw->qos.group0)); + err = PTR_ERR(esw->qos.group0); goto err_group0; } } - esw->qos.enabled = true; -unlock: - mutex_unlock(&esw->state_lock); - return; + refcount_set(&esw->qos.refcnt, 1); + + return 0; err_group0: - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - esw->qos.root_tsar_ix); - if (err) - esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); - mutex_unlock(&esw->state_lock); + if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_ix)) + esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n"); + + return err; } -void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw) +static void esw_qos_destroy(struct mlx5_eswitch *esw) { - struct devlink *devlink = priv_to_devlink(esw->dev); int err; - devlink_rate_nodes_destroy(devlink); - mutex_lock(&esw->state_lock); - if (!esw->qos.enabled) - goto unlock; - if (esw->qos.group0) - esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL); + __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL); err = mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH, esw->qos.root_tsar_ix); if (err) esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); +} - esw->qos.enabled = false; -unlock: - mutex_unlock(&esw->state_lock); +static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +{ + int err = 0; + + lockdep_assert_held(&esw->state_lock); + + if (!refcount_inc_not_zero(&esw->qos.refcnt)) { + /* esw_qos_create() set refcount to 1 only on success. + * No need to decrement on failure. + */ + err = esw_qos_create(esw, extack); + } + + return err; } -int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - u32 max_rate, u32 bw_share) +static void esw_qos_put(struct mlx5_eswitch *esw) +{ + lockdep_assert_held(&esw->state_lock); + if (refcount_dec_and_test(&esw->qos.refcnt)) + esw_qos_destroy(esw); +} + +static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) { int err; lockdep_assert_held(&esw->state_lock); - if (!esw->qos.enabled) + if (vport->qos.enabled) return 0; - if (vport->qos.enabled) - return -EEXIST; + err = esw_qos_get(esw, extack); + if (err) + return err; vport->qos.group = esw->qos.group0; err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share); - if (!err) { - vport->qos.enabled = true; - trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate); - } + if (err) + goto err_out; + + vport->qos.enabled = true; + trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate); + + return 0; + +err_out: + esw_qos_put(esw); return err; } @@ -633,7 +678,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo int err; lockdep_assert_held(&esw->state_lock); - if (!esw->qos.enabled || !vport->qos.enabled) + if (!vport->qos.enabled) return; WARN(vport->qos.group && vport->qos.group != esw->qos.group0, "Disabling QoS on port before detaching it from group"); @@ -645,8 +690,27 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", vport->vport, err); - vport->qos.enabled = false; + memset(&vport->qos, 0, sizeof(vport->qos)); trace_mlx5_esw_vport_qos_destroy(vport); + + esw_qos_put(esw); +} + +int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 min_rate, u32 max_rate) +{ + int err; + + lockdep_assert_held(&esw->state_lock); + err = esw_qos_vport_enable(esw, vport, 0, 0, NULL); + if (err) + return err; + + err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL); + if (!err) + err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL); + + return err; } int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) @@ -654,22 +718,29 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; struct mlx5_vport *vport; u32 bitmask; + int err; vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) return PTR_ERR(vport); - if (!vport->qos.enabled) - return -EOPNOTSUPP; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + mutex_lock(&esw->state_lock); + if (!vport->qos.enabled) { + /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */ + err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL); + } else { + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + + bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + err = mlx5_modify_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + ctx, + vport->qos.esw_tsar_ix, + bitmask); + } + mutex_unlock(&esw->state_lock); - return mlx5_modify_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.esw_tsar_ix, - bitmask); + return err; } #define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */ @@ -728,7 +799,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void return err; mutex_lock(&esw->state_lock); - err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); + err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + if (err) + goto unlock; + + err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); +unlock: mutex_unlock(&esw->state_lock); return err; } @@ -749,7 +825,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void * return err; mutex_lock(&esw->state_lock); - err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); + err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + if (err) + goto unlock; + + err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); +unlock: mutex_unlock(&esw->state_lock); return err; } @@ -846,7 +927,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, int err; mutex_lock(&esw->state_lock); - err = esw_qos_vport_update_group(esw, vport, group, extack); + err = esw_qos_vport_enable(esw, vport, 0, 0, extack); + if (!err) + err = esw_qos_vport_update_group(esw, vport, group, extack); mutex_unlock(&esw->state_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 28451abe2d2f..0141e9d52037 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -6,18 +6,8 @@ #ifdef CONFIG_MLX5_ESWITCH -int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, - struct mlx5_vport *evport, - u32 min_rate, - struct netlink_ext_ack *extack); -int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, - struct mlx5_vport *evport, - u32 max_rate, - struct netlink_ext_ack *extack); -void mlx5_esw_qos_create(struct mlx5_eswitch *esw); -void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw); -int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, - u32 max_rate, u32 bw_share); +int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport, + u32 max_rate, u32 min_rate); void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ec136b499204..458ec0bca1b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) if (err) return err; - /* Attach vport to the eswitch rate limiter */ - mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share); - if (mlx5_esw_is_manager_vport(esw, vport_num)) return 0; @@ -1260,8 +1257,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mlx5_eswitch_update_num_of_vfs(esw, num_vfs); - mlx5_esw_qos_create(esw); - esw->mode = mode; if (mode == MLX5_ESWITCH_LEGACY) { @@ -1290,7 +1285,6 @@ abort: if (mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - mlx5_esw_qos_destroy(esw); mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1305,12 +1299,17 @@ abort: */ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { + bool toggle_lag; int ret; if (!mlx5_esw_allowed(esw)) return 0; - mlx5_lag_disable_change(esw->dev); + toggle_lag = esw->mode == MLX5_ESWITCH_NONE; + + if (toggle_lag) + mlx5_lag_disable_change(esw->dev); + down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); @@ -1324,12 +1323,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) esw->esw_funcs.num_vfs = num_vfs; } up_write(&esw->mode_lock); - mlx5_lag_enable_change(esw->dev); + + if (toggle_lag) + mlx5_lag_enable_change(esw->dev); + return ret; } void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) { + struct devlink *devlink = priv_to_devlink(esw->dev); int old_mode; lockdep_assert_held_write(&esw->mode_lock); @@ -1359,7 +1362,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) if (old_mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - mlx5_esw_qos_destroy(esw); + devlink_rate_nodes_destroy(devlink); + mlx5_esw_acls_ns_cleanup(esw); if (clear_vf) @@ -1568,10 +1572,16 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) lockdep_register_key(&esw->mode_lock_key); init_rwsem(&esw->mode_lock); lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key); + refcount_set(&esw->qos.refcnt, 0); esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; + else + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); @@ -1601,6 +1611,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); + WARN_ON(refcount_read(&esw->qos.refcnt)); lockdep_unregister_key(&esw->mode_lock_key); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); @@ -1690,82 +1701,6 @@ bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num) return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF); } -static bool -is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) -{ - return vport_num == MLX5_VPORT_PF || - mlx5_eswitch_is_vf_vport(esw, vport_num) || - mlx5_esw_is_sf_vport(esw, vport_num); -} - -int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, - u8 *hw_addr, int *hw_addr_len, - struct netlink_ext_ack *extack) -{ - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int err = -EOPNOTSUPP; - u16 vport_num; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) - return PTR_ERR(esw); - - vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); - if (!is_port_function_supported(esw, vport_num)) - return -EOPNOTSUPP; - - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); - } - - mutex_lock(&esw->state_lock); - if (vport->enabled) { - ether_addr_copy(hw_addr, vport->info.mac); - *hw_addr_len = ETH_ALEN; - err = 0; - } - mutex_unlock(&esw->state_lock); - return err; -} - -int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, - const u8 *hw_addr, int hw_addr_len, - struct netlink_ext_ack *extack) -{ - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - int err = -EOPNOTSUPP; - u16 vport_num; - - esw = mlx5_devlink_eswitch_get(port->devlink); - if (IS_ERR(esw)) { - NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); - return PTR_ERR(esw); - } - - vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); - if (!is_port_function_supported(esw, vport_num)) { - NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); - return -EINVAL; - } - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) { - NL_SET_ERR_MSG_MOD(extack, "Invalid port"); - return PTR_ERR(vport); - } - - mutex_lock(&esw->state_lock); - if (vport->enabled) - err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr); - else - NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); - mutex_unlock(&esw->state_lock); - return err; -} - int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { @@ -1822,8 +1757,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; - ivi->min_tx_rate = evport->qos.min_rate; - ivi->max_tx_rate = evport->qos.max_rate; + if (evport->qos.enabled) { + ivi->min_tx_rate = evport->qos.min_rate; + ivi->max_tx_rate = evport->qos.max_rate; + } mutex_unlock(&esw->state_lock); return 0; @@ -1934,7 +1871,7 @@ free_out: return err; } -u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) +u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1948,7 +1885,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) struct mlx5_eswitch *esw; esw = dev->priv.eswitch; - return mlx5_esw_allowed(esw) ? esw->offloads.encap : + return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap : DEVLINK_ESWITCH_ENCAP_MODE_NONE; } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 42f8ee2e5d9f..ead5e8acc8be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -308,10 +308,14 @@ struct mlx5_eswitch { atomic64_t user_count; struct { - bool enabled; u32 root_tsar_ix; struct mlx5_esw_rate_group *group0; struct list_head groups; /* Protected by esw->state_lock */ + + /* Protected by esw->state_lock. + * Initially 0, meaning no QoS users and QoS is disabled. + */ + refcount_t refcnt; } qos; struct mlx5_esw_bridge_offloads *br_offloads; @@ -339,9 +343,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); -bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); -int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); - /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); @@ -516,11 +517,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u16 vport, u16 vlan, u8 qos, u8 set_flags); -static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) -{ - return esw->qos.enabled; -} - static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, u8 vlan_depth) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f4eaa5893886..9a7b25692505 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -295,26 +295,28 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, int *i) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - int j, err; + int err; if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE)) return -EOPNOTSUPP; - for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { - err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); - if (err) - goto err_setup_chain; + /* flow steering cannot handle more than one dest with the same ft + * in a single flow + */ + if (esw_attr->out_count - esw_attr->split_count > 1) + return -EOPNOTSUPP; - if (esw_attr->dests[j].pkt_reformat) { - flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; - flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; - } + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); + if (err) + return err; + + if (esw_attr->dests[esw_attr->split_count].pkt_reformat) { + flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat; } - return 0; + (*i)++; -err_setup_chain: - esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); - return err; + return 0; } static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, @@ -329,14 +331,25 @@ static bool esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + bool result = false; int i; - for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + /* Indirect table is supported only for flows with in_port uplink + * and the destination is vport on the same eswitch as the uplink, + * return false in case at least one of destinations doesn't meet + * this criteria. + */ + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { if (esw_attr->dests[i].rep && mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, - esw_attr->dests[i].mdev)) - return true; - return false; + esw_attr->dests[i].mdev)) { + result = true; + } else { + result = false; + break; + } + } + return result; } static int @@ -2512,6 +2525,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master, struct mlx5_eswitch *esw = master->priv.eswitch; struct mlx5_flow_table_attr ft_attr = { .max_fte = 1, .prio = 0, .level = 0, + .flags = MLX5_FLOW_TABLE_OTHER_VPORT, }; struct mlx5_flow_namespace *egress_ns; struct mlx5_flow_table *acl; @@ -3183,12 +3197,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) u64 mapping_id; int err; - if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && - MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; - else - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; - mutex_init(&esw->offloads.termtbl_mutex); mlx5_rdma_enable_roce(esw->dev); @@ -3286,7 +3294,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_metadata_uninit(esw); mlx5_rdma_disable_roce(esw->dev); mutex_destroy(&esw->offloads.termtbl_mutex); - esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -3630,7 +3637,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, *encap = esw->offloads.encap; unlock: up_write(&esw->mode_lock); - return 0; + return err; } static bool @@ -3862,3 +3869,62 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, return vport->metadata; } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set); + +static bool +is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) +{ + return vport_num == MLX5_VPORT_PF || + mlx5_eswitch_is_vf_vport(esw, vport_num) || + mlx5_esw_is_sf_vport(esw, vport_num); +} + +int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + u16 vport_num; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + if (!is_port_function_supported(esw, vport_num)) + return -EOPNOTSUPP; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid port"); + return PTR_ERR(vport); + } + + mutex_lock(&esw->state_lock); + ether_addr_copy(hw_addr, vport->info.mac); + *hw_addr_len = ETH_ALEN; + mutex_unlock(&esw->state_lock); + return 0; +} + +int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + u16 vport_num; + + esw = mlx5_devlink_eswitch_get(port->devlink); + if (IS_ERR(esw)) { + NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); + return PTR_ERR(esw); + } + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + if (!is_port_function_supported(esw, vport_num)) { + NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); + return -EINVAL; + } + + return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 750b21124a1a..dafe341358c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, list_for_each_entry(dst, &fte->node.children, node.list) { if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; - if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) num_encap++; num_fwd_destinations++; @@ -788,7 +789,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, int err; u32 *in; - if (namespace == MLX5_FLOW_NAMESPACE_FDB) + if (namespace == MLX5_FLOW_NAMESPACE_FDB || + namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS) max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); else max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size); @@ -860,6 +862,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, switch (namespace) { case MLX5_FLOW_NAMESPACE_FDB: + case MLX5_FLOW_NAMESPACE_FDB_BYPASS: max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions); table_type = FS_FT_FDB; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 386ab9a2d490..b628917e38e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2) { if (d1->type == d2->type) { - if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && d1->vport.num == d2->vport.num && d1->vport.flags == d2->vport.flags && ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? @@ -2206,6 +2207,22 @@ struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_get_fdb_sub_ns); +static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type) +{ + switch (type) { + case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_LAG: + case MLX5_FLOW_NAMESPACE_OFFLOADS: + case MLX5_FLOW_NAMESPACE_ETHTOOL: + case MLX5_FLOW_NAMESPACE_KERNEL: + case MLX5_FLOW_NAMESPACE_LEFTOVERS: + case MLX5_FLOW_NAMESPACE_ANCHOR: + return true; + default: + return false; + } +} + struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) { @@ -2235,31 +2252,39 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, if (steering->sniffer_tx_root_ns) return &steering->sniffer_tx_root_ns->ns; return NULL; - default: + case MLX5_FLOW_NAMESPACE_FDB_BYPASS: + root_ns = steering->fdb_root_ns; + prio = FDB_BYPASS_PATH; break; - } - - if (type == MLX5_FLOW_NAMESPACE_EGRESS || - type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) { + case MLX5_FLOW_NAMESPACE_EGRESS: + case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: root_ns = steering->egress_root_ns; prio = type - MLX5_FLOW_NAMESPACE_EGRESS; - } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX: root_ns = steering->rdma_rx_root_ns; prio = RDMA_RX_BYPASS_PRIO; - } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL: root_ns = steering->rdma_rx_root_ns; prio = RDMA_RX_KERNEL_PRIO; - } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX: root_ns = steering->rdma_tx_root_ns; - } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: root_ns = steering->rdma_rx_root_ns; prio = RDMA_RX_COUNTERS_PRIO; - } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS: root_ns = steering->rdma_tx_root_ns; prio = RDMA_TX_COUNTERS_PRIO; - } else { /* Must be NIC RX */ + break; + default: /* Must be NIC RX */ + WARN_ON(!is_nic_rx_ns(type)); root_ns = steering->root_ns; prio = type; + break; } if (!root_ns) @@ -2822,6 +2847,28 @@ static int create_fdb_fast_path(struct mlx5_flow_steering *steering) return 0; } +static int create_fdb_bypass(struct mlx5_flow_steering *steering) +{ + struct mlx5_flow_namespace *ns; + struct fs_prio *prio; + int i; + + prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0); + if (IS_ERR(prio)) + return PTR_ERR(prio); + + ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); + if (IS_ERR(ns)) + return PTR_ERR(ns); + + for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) { + prio = fs_create_prio(ns, i, 1); + if (IS_ERR(prio)) + return PTR_ERR(prio); + } + return 0; +} + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *maj_prio; @@ -2831,12 +2878,10 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; - maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, - 1); - if (IS_ERR(maj_prio)) { - err = PTR_ERR(maj_prio); + err = create_fdb_bypass(steering); + if (err) goto out_err; - } + err = create_fdb_fast_path(steering); if (err) goto out_err; @@ -3038,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) steering->dev = dev; dev->priv.steering = steering; + if (mlx5_fs_dr_is_supported(dev)) + steering->mode = MLX5_FLOW_STEERING_MODE_SMFS; + else + steering->mode = MLX5_FLOW_STEERING_MODE_DMFS; + steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs", sizeof(struct mlx5_flow_group), 0, 0, NULL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 7711db245c63..5469b08d635f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp { u32 qpn; }; -#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00 +#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00 /* Calculate the fte_match_param length and without the reserved length. * Make sure the reserved field is the last. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 31c99d53faf7..b406e0367af6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -38,9 +38,10 @@ #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000) /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) -#define MLX5_SF_NUM_COUNTERS_BULK 6 +#define MLX5_INIT_COUNTERS_BULK 8 #define MLX5_FC_POOL_MAX_THRESHOLD BIT(18) #define MLX5_FC_POOL_USED_BUFF_RATIO 10 @@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, spin_unlock(&fc_stats->counters_idr_lock); } -static int get_max_bulk_query_len(struct mlx5_core_dev *dev) +static int get_init_bulk_query_len(struct mlx5_core_dev *dev) { - int num_counters_bulk = mlx5_core_is_sf(dev) ? - MLX5_SF_NUM_COUNTERS_BULK : - MLX5_SW_MAX_COUNTERS_BULK; + return min_t(int, MLX5_INIT_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); +} - return min_t(int, num_counters_bulk, +static int get_max_bulk_query_len(struct mlx5_core_dev *dev) +{ + return min_t(int, MLX5_SW_MAX_COUNTERS_BULK, (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); } @@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; bool query_more_counters = (first->id <= last_id); - int max_bulk_len = get_max_bulk_query_len(dev); + int cur_bulk_len = fc_stats->bulk_query_len; u32 *data = fc_stats->bulk_query_out; struct mlx5_fc *counter = first; u32 bulk_base_id; @@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev, bulk_base_id = counter->id & ~0x3; /* number of counters to query inc. the last counter */ - bulk_len = min_t(int, max_bulk_len, + bulk_len = min_t(int, cur_bulk_len, ALIGN(last_id - bulk_base_id + 1, 4)); err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len, @@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter) mlx5_fc_free(dev, counter); } +static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + int max_bulk_len = get_max_bulk_query_len(dev); + unsigned long now = jiffies; + u32 *bulk_query_out_tmp; + int max_out_len; + + if (fc_stats->bulk_query_alloc_failed && + time_before(now, fc_stats->next_bulk_query_alloc)) + return; + + max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); + bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL); + if (!bulk_query_out_tmp) { + mlx5_core_warn_once(dev, + "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = true; + fc_stats->next_bulk_query_alloc = + now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD; + return; + } + + kfree(fc_stats->bulk_query_out); + fc_stats->bulk_query_out = bulk_query_out_tmp; + fc_stats->bulk_query_len = max_bulk_len; + if (fc_stats->bulk_query_alloc_failed) { + mlx5_core_info(dev, + "Flow counters bulk query buffer size increased, bulk_size(%d)\n", + max_bulk_len); + fc_stats->bulk_query_alloc_failed = false; + } +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, @@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work) queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - llist_for_each_entry(counter, addlist, addlist) + llist_for_each_entry(counter, addlist, addlist) { mlx5_fc_stats_insert(dev, counter); + fc_stats->num_counters++; + } llist_for_each_entry_safe(counter, tmp, dellist, dellist) { mlx5_fc_stats_remove(dev, counter); mlx5_fc_release(dev, counter); + fc_stats->num_counters--; } + if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) && + fc_stats->num_counters > get_init_bulk_query_len(dev)) + mlx5_fc_stats_bulk_query_size_increase(dev); + if (time_before(now, fc_stats->next_query) || list_empty(&fc_stats->counters)) return; @@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy); int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - int max_bulk_len; - int max_out_len; + int init_bulk_len; + int init_out_len; spin_lock_init(&fc_stats->counters_idr_lock); idr_init(&fc_stats->counters_idr); @@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) init_llist_head(&fc_stats->addlist); init_llist_head(&fc_stats->dellist); - max_bulk_len = get_max_bulk_query_len(dev); - max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len); - fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL); + init_bulk_len = get_init_bulk_query_len(dev); + init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len); + fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL); if (!fc_stats->bulk_query_out) return -ENOMEM; + fc_stats->bulk_query_len = init_bulk_len; fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 64f1abc4dc36..737df402c927 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -420,6 +420,11 @@ static void print_health_info(struct mlx5_core_dev *dev) if (!ioread8(&h->synd)) return; + if (ioread32be(&h->fw_ver) == 0xFFFFFFFF) { + mlx5_log(dev, LOGLEVEL_ERR, "PCI slot is unavailable\n"); + return; + } + rfr_severity = ioread8(&h->rfr_severity); severity = mlx5_health_get_severity(rfr_severity); mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n", @@ -835,6 +840,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms); add_timer(&health->timer); + + if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc)) + queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); } void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) @@ -902,8 +910,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev) INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work); INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work); INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update); - if (mlx5_core_is_pf(dev)) - queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 962d41418ce7..f4f7eaf16446 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -67,7 +67,9 @@ static void mlx5i_get_ethtool_stats(struct net_device *dev, } static int mlx5i_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(dev); @@ -75,7 +77,9 @@ static int mlx5i_set_ringparam(struct net_device *dev, } static void mlx5i_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(dev); @@ -105,7 +109,7 @@ static int mlx5i_set_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - return mlx5e_ethtool_set_coalesce(priv, coal); + return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); } static int mlx5i_get_coalesce(struct net_device *netdev, @@ -115,7 +119,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - return mlx5e_ethtool_get_coalesce(priv, coal); + return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); } static int mlx5i_get_ts_info(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index ea1efdecc88c..0a99a020a3b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -110,14 +110,14 @@ void mlx5i_cleanup(struct mlx5e_priv *priv) static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) { - struct mlx5e_sw_stats s = { 0 }; + struct rtnl_link_stats64 s = {}; int i, j; for (i = 0; i < priv->stats_nch; i++) { struct mlx5e_channel_stats *channel_stats; struct mlx5e_rq_stats *rq_stats; - channel_stats = &priv->channel_stats[i]; + channel_stats = priv->channel_stats[i]; rq_stats = &channel_stats->rq; s.rx_packets += rq_stats->packets; @@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) s.tx_packets += sq_stats->packets; s.tx_bytes += sq_stats->bytes; - s.tx_queue_dropped += sq_stats->dropped; + s.tx_dropped += sq_stats->dropped; } } - memcpy(&priv->stats.sw, &s, sizeof(s)); + memset(&priv->stats.sw, 0, sizeof(s)); + + priv->stats.sw.rx_packets = s.rx_packets; + priv->stats.sw.rx_bytes = s.rx_bytes; + priv->stats.sw.tx_packets = s.tx_packets; + priv->stats.sw.tx_bytes = s.tx_bytes; + priv->stats.sw.tx_queue_dropped = s.tx_dropped; } void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) @@ -443,7 +449,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5i_stats_grps, .stats_grps_num = mlx5i_stats_grps_num, - .rx_ptp_support = false, }; /* mlx5i netdev NDos */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 5308f23702bc..0b86e78dbc0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -350,7 +350,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), - .rx_ptp_support = false, }; const struct mlx5e_profile *mlx5i_pkey_get_profile(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c new file mode 100644 index 000000000000..380a208ab137 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "mlx5_core.h" +#include "mlx5_irq.h" +#include "pci_irq.h" + +static void cpu_put(struct mlx5_irq_pool *pool, int cpu) +{ + pool->irqs_per_cpu[cpu]--; +} + +static void cpu_get(struct mlx5_irq_pool *pool, int cpu) +{ + pool->irqs_per_cpu[cpu]++; +} + +/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */ +static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, + const struct cpumask *req_mask) +{ + int best_cpu = -1; + int cpu; + + for_each_cpu_and(cpu, req_mask, cpu_online_mask) { + /* CPU has zero IRQs on it. No need to search any more CPUs. */ + if (!pool->irqs_per_cpu[cpu]) { + best_cpu = cpu; + break; + } + if (best_cpu < 0) + best_cpu = cpu; + if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) + best_cpu = cpu; + } + if (best_cpu == -1) { + /* There isn't online CPUs in req_mask */ + mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", + cpumask_pr_args(req_mask)); + best_cpu = cpumask_first(cpu_online_mask); + } + pool->irqs_per_cpu[best_cpu]++; + return best_cpu; +} + +/* Creating an IRQ from irq_pool */ +static struct mlx5_irq * +irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) +{ + cpumask_var_t auto_mask; + struct mlx5_irq *irq; + u32 irq_index; + int err; + + if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); + if (err) + return ERR_PTR(err); + if (pool->irqs_per_cpu) { + if (cpumask_weight(req_mask) > 1) + /* if req_mask contain more then one CPU, set the least loadad CPU + * of req_mask + */ + cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask); + else + cpu_get(pool, cpumask_first(req_mask)); + } + irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask); + free_cpumask_var(auto_mask); + return irq; +} + +/* Looking for the IRQ with the smallest refcount that fits req_mask. + * If pool is sf_comp_pool, then we are looking for an IRQ with any of the + * requested CPUs in req_mask. + * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask + * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask, + * we don't skip it. + * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will + * fit. And since mask is subset of itself, we will pass the first if bellow. + */ +static struct mlx5_irq * +irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) +{ + int start = pool->xa_num_irqs.min; + int end = pool->xa_num_irqs.max; + struct mlx5_irq *irq = NULL; + struct mlx5_irq *iter; + int irq_refcount = 0; + unsigned long index; + + lockdep_assert_held(&pool->lock); + xa_for_each_range(&pool->irqs, index, iter, start, end) { + struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter); + int iter_refcount = mlx5_irq_read_locked(iter); + + if (!cpumask_subset(iter_mask, req_mask)) + /* skip IRQs with a mask which is not subset of req_mask */ + continue; + if (iter_refcount < pool->min_threshold) + /* If we found an IRQ with less than min_thres, return it */ + return iter; + if (!irq || iter_refcount < irq_refcount) { + /* In case we won't find an IRQ with less than min_thres, + * keep a pointer to the least used IRQ + */ + irq_refcount = iter_refcount; + irq = iter; + } + } + return irq; +} + +/** + * mlx5_irq_affinity_request - request an IRQ according to the given mask. + * @pool: IRQ pool to request from. + * @req_mask: cpumask requested for this IRQ. + * + * This function returns a pointer to IRQ, or ERR_PTR in case of error. + */ +struct mlx5_irq * +mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) +{ + struct mlx5_irq *least_loaded_irq, *new_irq; + + mutex_lock(&pool->lock); + least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask); + if (least_loaded_irq && + mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold) + goto out; + /* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */ + new_irq = irq_pool_request_irq(pool, req_mask); + if (IS_ERR(new_irq)) { + if (!least_loaded_irq) { + /* We failed to create an IRQ and we didn't find an IRQ */ + mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n", + PTR_ERR(new_irq)); + mutex_unlock(&pool->lock); + return new_irq; + } + /* We failed to create a new IRQ for the requested affinity, + * sharing existing IRQ. + */ + goto out; + } + least_loaded_irq = new_irq; + goto unlock; +out: + mlx5_irq_get_locked(least_loaded_irq); + if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold) + mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", + pci_irq_vector(pool->dev->pdev, + mlx5_irq_get_index(least_loaded_irq)), pool->name, + mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ); +unlock: + mutex_unlock(&pool->lock); + return least_loaded_irq; +} + +void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs, + int num_irqs) +{ + struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + int i; + + for (i = 0; i < num_irqs; i++) { + int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i])); + + synchronize_irq(pci_irq_vector(pool->dev->pdev, + mlx5_irq_get_index(irqs[i]))); + if (mlx5_irq_put(irqs[i])) + if (pool->irqs_per_cpu) + cpu_put(pool, cpu); + } +} + +/** + * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device. + * @dev: mlx5 device that is requesting the IRQs. + * @nirqs: number of IRQs to request. + * @irqs: an output array of IRQs pointers. + * + * Each IRQ is bounded to at most 1 CPU. + * This function is requesting IRQs according to the default assignment. + * The default assignment policy is: + * - in each iteration, request the least loaded IRQ which is not bound to any + * CPU of the previous IRQs requested. + * + * This function returns the number of IRQs requested, (which might be smaller than + * @nirqs), if successful, or a negative error code in case of an error. + */ +int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, + struct mlx5_irq **irqs) +{ + struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + cpumask_var_t req_mask; + struct mlx5_irq *irq; + int i = 0; + + if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(req_mask, cpu_online_mask); + for (i = 0; i < nirqs; i++) { + if (mlx5_irq_pool_is_sf_pool(pool)) + irq = mlx5_irq_affinity_request(pool, req_mask); + else + /* In case SF pool doesn't exists, fallback to the PF IRQs. + * The PF IRQs are already allocated and binded to CPU + * at this point. Hence, only an index is needed. + */ + irq = mlx5_irq_request(dev, i, NULL); + if (IS_ERR(irq)) + break; + irqs[i] = irq; + cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask); + mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", + pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)), + cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), + mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); + } + free_cpumask_var(req_mask); + if (!i) + return PTR_ERR(irq); + return i; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 48d2ea690d7a..4ddf6b330a44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, bool is_bonded, is_in_lag, mode_supported; int bond_status = 0; int num_slaves = 0; + int changed = 0; int idx; if (!netif_is_lag_master(upper)) @@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, */ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3; - if (!mlx5_lag_is_ready(ldev) && is_in_lag) { - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, PF is configured with more than 64 VFs"); - return 0; - } - /* Lag mode must be activebackup or hash. */ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH; - if (is_in_lag && !mode_supported) - NL_SET_ERR_MSG_MOD(info->info.extack, - "Can't activate LAG offload, TX type isn't supported"); - is_bonded = is_in_lag && mode_supported; if (tracker->is_bonded != is_bonded) { tracker->is_bonded = is_bonded; - return 1; + changed = 1; } - return 0; + if (!is_in_lag) + return changed; + + if (!mlx5_lag_is_ready(ldev)) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, PF is configured with more than 64 VFs"); + else if (!mode_supported) + NL_SET_ERR_MSG_MOD(info->info.extack, + "Can't activate LAG offload, TX type isn't supported"); + + return changed; } static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, @@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, ldev = container_of(this, struct mlx5_lag, nb); - if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE) - return NOTIFY_DONE; - tracker = ldev->tracker; switch (event) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index bf4d3cbefa63..1ca01a5b6cdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -268,10 +268,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, fen_info = container_of(info, struct fib_entry_notifier_info, info); fi = fen_info->fi; - if (fi->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); - return notifier_from_errno(-EINVAL); - } + if (fi->nh) + return NOTIFY_DONE; fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev && fib_dev != ldev->pf[MLX5_LAG_P2].netdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index ad63dd45c8fb..a6592f9c3c05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) if (port_sel->tunnel) mlx5_destroy_ttc_table(port_sel->inner.ttc); mlx5_lag_destroy_definers(ldev); + memset(port_sel, 0, sizeof(*port_sel)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 97e5845b4cfd..d5e47630e284 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) { + if (!mlx5_chains_prios_supported(chains)) + return 1; + if (mlx5_chains_ignore_flow_level_supported(chains)) return UINT_MAX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index 0dd96a6b140d..c1df0d3595d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type dev->timeouts->to[type] = val; } -static void tout_set_def_val(struct mlx5_core_dev *dev) +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) { int i; - for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++) + for (i = 0; i < MAX_TIMEOUT_TYPES; i++) tout_set(dev, tout_def_sw_val[i], i); } @@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev) if (!dev->timeouts) return -ENOMEM; - tout_set_def_val(dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 31faa5c17aa9..1c42ead782fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); +void mlx5_tout_set_def_val(struct mlx5_core_dev *dev); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a92a92a52346..2c774f367199 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -98,6 +98,8 @@ enum { MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, }; +#define LOG_MAX_SUPPORTED_QPS 0xff + static struct mlx5_profile profile[] = { [0] = { .mask = 0, @@ -109,7 +111,7 @@ static struct mlx5_profile profile[] = { [2] = { .mask = MLX5_PROF_MASK_QP_SIZE | MLX5_PROF_MASK_MR_CACHE, - .log_max_qp = 18, + .log_max_qp = LOG_MAX_SUPPORTED_QPS, .mr_cache[0] = { .size = 500, .limit = 250 @@ -484,10 +486,26 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP); } +static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(dev); + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &val); + if (!err) + return val.vu32; + mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err); + return err; +} + static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) { struct mlx5_profile *prof = &dev->profile; void *set_hca_cap; + int max_uc_list; int err; err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); @@ -507,7 +525,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) to_fw_pkey_sz(dev, 128)); /* Check log_max_qp from HCA caps to set in current profile */ - if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { + if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { + prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, MLX5_CAP_GEN_MAX(dev, log_max_qp)); @@ -561,6 +581,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) if (MLX5_CAP_GEN(dev, roce_rw_supported)) MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev)); + max_uc_list = max_uc_list_get_devlink_param(dev); + if (max_uc_list > 0) + MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list, + ilog2(max_uc_list)); + return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); } @@ -992,11 +1017,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - err = mlx5_tout_init(dev); - if (err) { - mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); - return err; - } + mlx5_tout_set_def_val(dev); /* wait for firmware to accept initialization segments configurations */ @@ -1005,13 +1026,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) if (err) { mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); - goto err_tout_cleanup; + return err; } err = mlx5_cmd_init(dev); if (err) { mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); - goto err_tout_cleanup; + return err; } mlx5_tout_query_iseg(dev); @@ -1075,18 +1096,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) mlx5_set_driver_version(dev); - mlx5_start_health_poll(dev); - err = mlx5_query_hca_caps(dev); if (err) { mlx5_core_err(dev, "query hca failed\n"); - goto stop_health; + goto reclaim_boot_pages; } + mlx5_start_health_poll(dev); + return 0; -stop_health: - mlx5_stop_health_poll(dev, boot); reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); err_disable_hca: @@ -1094,8 +1113,6 @@ err_disable_hca: err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); -err_tout_cleanup: - mlx5_tout_cleanup(dev); return err; } @@ -1114,7 +1131,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_cleanup(dev); - mlx5_tout_cleanup(dev); return 0; } @@ -1476,6 +1492,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); + err = mlx5_tout_init(dev); + if (err) { + mlx5_core_err(dev, "Failed initializing timeouts, aborting\n"); + goto err_timeout_init; + } + err = mlx5_health_init(dev); if (err) goto err_health_init; @@ -1501,6 +1523,8 @@ err_adev_init: err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: + mlx5_tout_cleanup(dev); +err_timeout_init: debugfs_remove(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1518,6 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); + mlx5_tout_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); @@ -1604,12 +1629,28 @@ static void remove_one(struct pci_dev *pdev) mlx5_devlink_free(devlink); } +#define mlx5_pci_trace(dev, fmt, ...) ({ \ + struct mlx5_core_dev *__dev = (dev); \ + mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \ + __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \ + __dev->pci_status, ##__VA_ARGS__); \ +}) + +static const char *result2str(enum pci_ers_result result) +{ + return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" : + result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" : + result == PCI_ERS_RESULT_RECOVERED ? "recovered" : + "unknown"; +} + static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + enum pci_ers_result res; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state); mlx5_enter_error_state(dev, false); mlx5_error_sw_reset(dev); @@ -1617,8 +1658,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); - return state == pci_channel_io_perm_failure ? + res = state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; + + mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res)); + return res; } /* wait for the device to show vital signs by waiting @@ -1652,28 +1696,34 @@ static int wait_vital(struct pci_dev *pdev) static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) { + enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT; struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter\n"); err = mlx5_pci_enable_device(dev); if (err) { mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n", __func__, err); - return PCI_ERS_RESULT_DISCONNECT; + goto out; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); - if (wait_vital(pdev)) { - mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__); - return PCI_ERS_RESULT_DISCONNECT; + err = wait_vital(pdev); + if (err) { + mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n", + __func__, err); + goto out; } - return PCI_ERS_RESULT_RECOVERED; + res = PCI_ERS_RESULT_RECOVERED; +out: + mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res)); + return res; } static void mlx5_pci_resume(struct pci_dev *pdev) @@ -1681,14 +1731,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; - mlx5_core_info(dev, "%s was called\n", __func__); + mlx5_pci_trace(dev, "Enter, loading driver..\n"); err = mlx5_load_one(dev); - if (err) - mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n", - __func__, err); - else - mlx5_core_info(dev, "%s: device recovered\n", __func__); + + mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err, + !err ? "recovered" : "Failed"); } static const struct pci_error_handlers mlx5_err_handler = { @@ -1809,12 +1857,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev) int mlx5_recover_device(struct mlx5_core_dev *dev) { - int ret = -EIO; + if (!mlx5_core_is_sf(dev)) { + mlx5_pci_disable_device(dev); + if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED) + return -EIO; + } - mlx5_pci_disable_device(dev); - if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) - ret = mlx5_load_one(dev); - return ret; + return mlx5_load_one(dev); } static struct pci_driver mlx5_core_driver = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index bb677329ea08..6f8baa0f2a73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -305,5 +305,6 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev); +bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index 8116815663a7..23cb63fa4588 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -22,12 +22,40 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn, int msix_vec_count); int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs); +struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev); +void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq); struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, struct cpumask *affinity); -void mlx5_irq_release(struct mlx5_irq *irq); +int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, + struct mlx5_irq **irqs); +void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs); int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb); struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); int mlx5_irq_get_index(struct mlx5_irq *irq); +struct mlx5_irq_pool; +#ifdef CONFIG_MLX5_SF +int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, + struct mlx5_irq **irqs); +struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, + const struct cpumask *req_mask); +void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs, + int num_irqs); +#else +static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs, + struct mlx5_irq **irqs) +{ + return -EOPNOTSUPP; +} + +static inline struct mlx5_irq * +mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, + struct mlx5_irq **irqs, int num_irqs) {} +#endif #endif /* __MLX5_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 830444f927d4..90fec0649ef5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -7,15 +7,12 @@ #include <linux/mlx5/driver.h> #include "mlx5_core.h" #include "mlx5_irq.h" +#include "pci_irq.h" #include "lib/sf.h" #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif -#define MLX5_MAX_IRQ_NAME (32) -/* max irq_index is 2047, so four chars */ -#define MLX5_MAX_IRQ_IDX_CHARS (4) - #define MLX5_SFS_PER_CTRL_IRQ 64 #define MLX5_IRQ_CTRL_SF_MAX 8 /* min num of vectors for SFs to be enabled */ @@ -25,7 +22,6 @@ #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX) #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1) #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4) -#define MLX5_EQ_REFS_PER_IRQ (2) struct mlx5_irq { struct atomic_notifier_head nh; @@ -37,16 +33,6 @@ struct mlx5_irq { int irqn; }; -struct mlx5_irq_pool { - char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS]; - struct xa_limit xa_num_irqs; - struct mutex lock; /* sync IRQs creations */ - struct xarray irqs; - u32 max_threshold; - u32 min_threshold; - struct mlx5_core_dev *dev; -}; - struct mlx5_irq_table { struct mlx5_irq_pool *pf_pool; struct mlx5_irq_pool *sf_ctrl_pool; @@ -153,18 +139,28 @@ static void irq_release(struct mlx5_irq *irq) kfree(irq); } -static void irq_put(struct mlx5_irq *irq) +int mlx5_irq_put(struct mlx5_irq *irq) { struct mlx5_irq_pool *pool = irq->pool; + int ret = 0; mutex_lock(&pool->lock); irq->refcount--; - if (!irq->refcount) + if (!irq->refcount) { irq_release(irq); + ret = 1; + } mutex_unlock(&pool->lock); + return ret; +} + +int mlx5_irq_read_locked(struct mlx5_irq *irq) +{ + lockdep_assert_held(&irq->pool->lock); + return irq->refcount; } -static int irq_get_locked(struct mlx5_irq *irq) +int mlx5_irq_get_locked(struct mlx5_irq *irq) { lockdep_assert_held(&irq->pool->lock); if (WARN_ON_ONCE(!irq->refcount)) @@ -178,7 +174,7 @@ static int irq_get(struct mlx5_irq *irq) int err; mutex_lock(&irq->pool->lock); - err = irq_get_locked(irq); + err = mlx5_irq_get_locked(irq); mutex_unlock(&irq->pool->lock); return err; } @@ -210,12 +206,8 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); } -static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) -{ - return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); -} - -static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) +struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, + const struct cpumask *affinity) { struct mlx5_core_dev *dev = pool->dev; char name[MLX5_MAX_IRQ_NAME]; @@ -226,7 +218,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) if (!irq) return ERR_PTR(-ENOMEM); irq->irqn = pci_irq_vector(dev->pdev, i); - if (!irq_pool_is_sf_pool(pool)) + if (!mlx5_irq_pool_is_sf_pool(pool)) irq_set_name(pool, name, i); else irq_sf_set_name(pool, name, i); @@ -244,6 +236,10 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) err = -ENOMEM; goto err_cpumask; } + if (affinity) { + cpumask_copy(irq->mask, affinity); + irq_set_affinity_hint(irq->irqn, irq->mask); + } irq->pool = pool; irq->refcount = 1; irq->index = i; @@ -255,6 +251,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) } return irq; err_xa: + irq_set_affinity_hint(irq->irqn, NULL); free_cpumask_var(irq->mask); err_cpumask: free_irq(irq->irqn, &irq->nh); @@ -275,7 +272,7 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) return -ENOENT; ret = atomic_notifier_chain_register(&irq->nh, nb); if (ret) - irq_put(irq); + mlx5_irq_put(irq); return ret; } @@ -284,7 +281,7 @@ int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) int err = 0; err = atomic_notifier_chain_unregister(&irq->nh, nb); - irq_put(irq); + mlx5_irq_put(irq); return err; } @@ -300,131 +297,121 @@ int mlx5_irq_get_index(struct mlx5_irq *irq) /* irq_pool API */ -/* creating an irq from irq_pool */ -static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool, - struct cpumask *affinity) +/* requesting an irq from a given pool according to given index */ +static struct mlx5_irq * +irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, + struct cpumask *affinity) { struct mlx5_irq *irq; - u32 irq_index; - int err; - err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, - GFP_KERNEL); - if (err) - return ERR_PTR(err); - irq = irq_request(pool, irq_index); - if (IS_ERR(irq)) - return irq; - cpumask_copy(irq->mask, affinity); - irq_set_affinity_hint(irq->irqn, irq->mask); + mutex_lock(&pool->lock); + irq = xa_load(&pool->irqs, vecidx); + if (irq) { + mlx5_irq_get_locked(irq); + goto unlock; + } + irq = mlx5_irq_alloc(pool, vecidx, affinity); +unlock: + mutex_unlock(&pool->lock); return irq; } -/* looking for the irq with the smallest refcount and the same affinity */ -static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, - struct cpumask *affinity) +static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table) { - int start = pool->xa_num_irqs.min; - int end = pool->xa_num_irqs.max; - struct mlx5_irq *irq = NULL; - struct mlx5_irq *iter; - unsigned long index; + return irq_table->sf_ctrl_pool; +} - lockdep_assert_held(&pool->lock); - xa_for_each_range(&pool->irqs, index, iter, start, end) { - if (!cpumask_equal(iter->mask, affinity)) - continue; - if (iter->refcount < pool->min_threshold) - return iter; - if (!irq || iter->refcount < irq->refcount) - irq = iter; - } - return irq; +static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table) +{ + return irq_table->sf_comp_pool; } -/* requesting an irq from a given pool according to given affinity */ -static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, - struct cpumask *affinity) +struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev) { - struct mlx5_irq *least_loaded_irq, *new_irq; + struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev); + struct mlx5_irq_pool *pool = NULL; - mutex_lock(&pool->lock); - least_loaded_irq = irq_pool_find_least_loaded(pool, affinity); - if (least_loaded_irq && - least_loaded_irq->refcount < pool->min_threshold) - goto out; - new_irq = irq_pool_create_irq(pool, affinity); - if (IS_ERR(new_irq)) { - if (!least_loaded_irq) { - mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n", - cpumask_first(affinity)); - mutex_unlock(&pool->lock); - return new_irq; - } - /* We failed to create a new IRQ for the requested affinity, - * sharing existing IRQ. - */ - goto out; - } - least_loaded_irq = new_irq; - goto unlock; -out: - irq_get_locked(least_loaded_irq); - if (least_loaded_irq->refcount > pool->max_threshold) - mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", - least_loaded_irq->irqn, pool->name, - least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ); -unlock: - mutex_unlock(&pool->lock); - return least_loaded_irq; + if (mlx5_core_is_sf(dev)) + pool = sf_irq_pool_get(irq_table); + + /* In some configs, there won't be a pool of SFs IRQs. Hence, returning + * the PF IRQs pool in case the SF pool doesn't exist. + */ + return pool ? pool : irq_table->pf_pool; } -/* requesting an irq from a given pool according to given index */ -static struct mlx5_irq * -irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, - struct cpumask *affinity) +static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev) { - struct mlx5_irq *irq; + struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev); + struct mlx5_irq_pool *pool = NULL; - mutex_lock(&pool->lock); - irq = xa_load(&pool->irqs, vecidx); - if (irq) { - irq_get_locked(irq); - goto unlock; + if (mlx5_core_is_sf(dev)) + pool = sf_ctrl_irq_pool_get(irq_table); + + /* In some configs, there won't be a pool of SFs IRQs. Hence, returning + * the PF IRQs pool in case the SF pool doesn't exist. + */ + return pool ? pool : irq_table->pf_pool; +} + +/** + * mlx5_irqs_release - release one or more IRQs back to the system. + * @irqs: IRQs to be released. + * @nirqs: number of IRQs to be released. + */ +static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs) +{ + int i; + + for (i = 0; i < nirqs; i++) { + synchronize_irq(irqs[i]->irqn); + mlx5_irq_put(irqs[i]); } - irq = irq_request(pool, vecidx); - if (IS_ERR(irq) || !affinity) - goto unlock; - cpumask_copy(irq->mask, affinity); - if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max && - cpumask_empty(irq->mask)) - cpumask_set_cpu(0, irq->mask); - irq_set_affinity_hint(irq->irqn, irq->mask); -unlock: - mutex_unlock(&pool->lock); - return irq; } -static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table, - int i, struct cpumask *affinity) +/** + * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system. + * @ctrl_irq: ctrl IRQ to be released. + */ +void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq) { - if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL) - return irq_table->sf_ctrl_pool; - return irq_table->sf_comp_pool; + mlx5_irqs_release(&ctrl_irq, 1); } /** - * mlx5_irq_release - release an IRQ back to the system. - * @irq: irq to be released. + * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device. + * @dev: mlx5 device that requesting the IRQ. + * + * This function returns a pointer to IRQ, or ERR_PTR in case of error. */ -void mlx5_irq_release(struct mlx5_irq *irq) +struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev) { - synchronize_irq(irq->irqn); - irq_put(irq); + struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev); + cpumask_var_t req_mask; + struct mlx5_irq *irq; + + if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + cpumask_copy(req_mask, cpu_online_mask); + if (!mlx5_irq_pool_is_sf_pool(pool)) { + /* In case we are allocating a control IRQ for PF/VF */ + if (!pool->xa_num_irqs.max) { + cpumask_clear(req_mask); + /* In case we only have a single IRQ for PF/VF */ + cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask); + } + /* Allocate the IRQ in the last index of the pool */ + irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask); + } else { + irq = mlx5_irq_affinity_request(pool, req_mask); + } + + free_cpumask_var(req_mask); + return irq; } /** - * mlx5_irq_request - request an IRQ for mlx5 device. + * mlx5_irq_request - request an IRQ for mlx5 PF/VF device. * @dev: mlx5 device that requesting the IRQ. * @vecidx: vector index of the IRQ. This argument is ignore if affinity is * provided. @@ -439,23 +426,8 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, struct mlx5_irq_pool *pool; struct mlx5_irq *irq; - if (mlx5_core_is_sf(dev)) { - pool = find_sf_irq_pool(irq_table, vecidx, affinity); - if (!pool) - /* we don't have IRQs for SFs, using the PF IRQs */ - goto pf_irq; - if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp")) - /* In case an SF user request IRQ with vecidx */ - irq = irq_pool_request_vector(pool, vecidx, NULL); - else - irq = irq_pool_request_affinity(pool, affinity); - goto out; - } -pf_irq: pool = irq_table->pf_pool; - vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx; irq = irq_pool_request_vector(pool, vecidx, affinity); -out: if (IS_ERR(irq)) return irq; mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n", @@ -464,6 +436,51 @@ out: return irq; } +/** + * mlx5_irqs_release_vectors - release one or more IRQs back to the system. + * @irqs: IRQs to be released. + * @nirqs: number of IRQs to be released. + */ +void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs) +{ + mlx5_irqs_release(irqs, nirqs); +} + +/** + * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device. + * @dev: mlx5 device that is requesting the IRQs. + * @cpus: CPUs array for binding the IRQs + * @nirqs: number of IRQs to request. + * @irqs: an output array of IRQs pointers. + * + * Each IRQ is bound to at most 1 CPU. + * This function is requests nirqs IRQs, starting from @vecidx. + * + * This function returns the number of IRQs requested, (which might be smaller than + * @nirqs), if successful, or a negative error code in case of an error. + */ +int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, + struct mlx5_irq **irqs) +{ + cpumask_var_t req_mask; + struct mlx5_irq *irq; + int i; + + if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) + return -ENOMEM; + for (i = 0; i < nirqs; i++) { + cpumask_set_cpu(cpus[i], req_mask); + irq = mlx5_irq_request(dev, i, req_mask); + if (IS_ERR(irq)) + break; + cpumask_clear(req_mask); + irqs[i] = irq; + } + + free_cpumask_var(req_mask); + return i ? i : PTR_ERR(irq); +} + static struct mlx5_irq_pool * irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, u32 min_threshold, u32 max_threshold) @@ -479,7 +496,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, pool->xa_num_irqs.max = start + size - 1; if (name) snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS, - name); + "%s", name); pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ; pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ; mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d", @@ -500,6 +517,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) irq_release(irq); xa_destroy(&pool->irqs); mutex_destroy(&pool->lock); + kfree(pool->irqs_per_cpu); kvfree(pool); } @@ -547,7 +565,17 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec) err = PTR_ERR(table->sf_comp_pool); goto err_sf_ctrl; } + + table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL); + if (!table->sf_comp_pool->irqs_per_cpu) { + err = -ENOMEM; + goto err_irqs_per_cpu; + } + return 0; + +err_irqs_per_cpu: + irq_pool_free(table->sf_comp_pool); err_sf_ctrl: irq_pool_free(table->sf_ctrl_pool); err_pf: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h new file mode 100644 index 000000000000..5c7e68bee43a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __PCI_IRQ_H__ +#define __PCI_IRQ_H__ + +#include <linux/mlx5/driver.h> + +#define MLX5_MAX_IRQ_NAME (32) +/* max irq_index is 2047, so four chars */ +#define MLX5_MAX_IRQ_IDX_CHARS (4) +#define MLX5_EQ_REFS_PER_IRQ (2) + +struct mlx5_irq; + +struct mlx5_irq_pool { + char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS]; + struct xa_limit xa_num_irqs; + struct mutex lock; /* sync IRQs creations */ + struct xarray irqs; + u32 max_threshold; + u32 min_threshold; + u16 *irqs_per_cpu; + struct mlx5_core_dev *dev; +}; + +struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev); +static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) +{ + return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); +} + +struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, + const struct cpumask *affinity); +int mlx5_irq_get_locked(struct mlx5_irq *irq); +int mlx5_irq_read_locked(struct mlx5_irq *irq); +int mlx5_irq_put(struct mlx5_irq *irq); + +#endif /* __PCI_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index f37db7cc32a6..7da012ff0d41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -30,10 +30,7 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; - if (!mlx5_sf_dev_supported(dev)) - return false; - - return !xa_empty(&table->devices); + return table && !xa_empty(&table->devices); } static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 252d6017387d..17aa348989cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) { struct mlx5_sf_hw_table *table; u16 max_ext_fn = 0; - u16 ext_base_id; + u16 ext_base_id = 0; u16 max_fn = 0; u16 base_id; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 07936841ce99..c61a5e83c78c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action) return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; } +static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn) +{ + return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) && + !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify); +} + static int dr_actions_convert_modify_header(struct mlx5dr_action *action, u32 max_hw_actions, u32 num_sw_actions, @@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, if (ret) return ret; - if (!(*modify_ttl)) - *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action); + if (!(*modify_ttl) && + dr_action_modify_check_is_ttl_modify(sw_action)) { + if (dr_action_modify_ttl_ignore(dmn)) + continue; + + *modify_ttl = true; + } /* Convert SW action to HW action */ ret = dr_action_modify_sw_to_hw(dmn, @@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, * modify actions doesn't exceeds the limit */ hw_idx++; - if ((num_sw_actions + hw_idx - i) >= max_hw_actions) { + if (hw_idx >= max_hw_actions) { mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n"); return -EINVAL; } @@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, hw_idx++; } + /* if the resulting HW actions list is empty, add NOP action */ + if (!hw_idx) + hw_idx++; + *num_hw_actions = hw_idx; return 0; @@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, int mlx5dr_action_destroy(struct mlx5dr_action *action) { - if (refcount_read(&action->refcount) > 1) + if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1)) return -EBUSY; switch (action->action_type) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 1d8febed0d76..4dd619d238cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new); + /* geneve_tlv_option_0_exist is the indication of + * STE support for lookup type flex_parser_ok + */ + caps->flex_parser_ok_bits_supp = + MLX5_CAP_FLOWTABLE(mdev, + flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist); + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); @@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->flex_parser_id_mpls_over_gre = MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre); - if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) + if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED) caps->flex_parser_id_mpls_over_udp = MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label); @@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, for (i = 0; i < fte->dests_size; i++) { if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; - if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) num_encap++; num_fwd_destinations++; @@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = fte->dest_arr[i].ft_id; break; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: - id = fte->dest_arr[i].vport.num; - MLX5_SET(dest_format_struct, in_dests, - destination_eswitch_owner_vhca_id_valid, - !!(fte->dest_arr[i].vport.flags & - MLX5_FLOW_DEST_VPORT_VHCA_ID)); + if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) { + id = fte->dest_arr[i].vport.num; + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id_valid, + !!(fte->dest_arr[i].vport.flags & + MLX5_FLOW_DEST_VPORT_VHCA_ID)); + } else { + id = 0; + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id_valid, 1); + } MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id, fte->dest_arr[i].vport.vhca_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c new file mode 100644 index 000000000000..2784cd59fefe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -0,0 +1,649 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include "dr_types.h" + +#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL) + +enum dr_dump_rec_type { + DR_DUMP_REC_TYPE_DOMAIN = 3000, + DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001, + DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002, + DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003, + DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004, + DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005, + + DR_DUMP_REC_TYPE_TABLE = 3100, + DR_DUMP_REC_TYPE_TABLE_RX = 3101, + DR_DUMP_REC_TYPE_TABLE_TX = 3102, + + DR_DUMP_REC_TYPE_MATCHER = 3200, + DR_DUMP_REC_TYPE_MATCHER_MASK = 3201, + DR_DUMP_REC_TYPE_MATCHER_RX = 3202, + DR_DUMP_REC_TYPE_MATCHER_TX = 3203, + DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204, + + DR_DUMP_REC_TYPE_RULE = 3300, + DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301, + DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302, + DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303, + DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304, + + DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400, + DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401, + DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402, + DR_DUMP_REC_TYPE_ACTION_DROP = 3403, + DR_DUMP_REC_TYPE_ACTION_QP = 3404, + DR_DUMP_REC_TYPE_ACTION_FT = 3405, + DR_DUMP_REC_TYPE_ACTION_CTR = 3406, + DR_DUMP_REC_TYPE_ACTION_TAG = 3407, + DR_DUMP_REC_TYPE_ACTION_VPORT = 3408, + DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409, + DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410, + DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411, + DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412, + DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413, + DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415, + DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420, + DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421 +}; + +void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl) +{ + mutex_lock(&tbl->dmn->dump_info.dbg_mutex); + list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list); + mutex_unlock(&tbl->dmn->dump_info.dbg_mutex); +} + +void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl) +{ + mutex_lock(&tbl->dmn->dump_info.dbg_mutex); + list_del(&tbl->dbg_node); + mutex_unlock(&tbl->dmn->dump_info.dbg_mutex); +} + +void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule) +{ + struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; + + mutex_lock(&dmn->dump_info.dbg_mutex); + list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list); + mutex_unlock(&dmn->dump_info.dbg_mutex); +} + +void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule) +{ + struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; + + mutex_lock(&dmn->dump_info.dbg_mutex); + list_del(&rule->dbg_node); + mutex_unlock(&dmn->dump_info.dbg_mutex); +} + +static u64 dr_dump_icm_to_idx(u64 icm_addr) +{ + return (icm_addr >> 6) & 0xffffffff; +} + +#define DR_HEX_SIZE 256 + +static void +dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size) +{ + if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1)) + size = DR_HEX_SIZE / 2 - 1; /* truncate */ + + bin2hex(hex, src, size); + hex[2 * size] = 0; /* NULL-terminate */ +} + +static int +dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, + struct mlx5dr_rule_action_member *action_mem) +{ + struct mlx5dr_action *action = action_mem->action; + const u64 action_id = DR_DBG_PTR_TO_ID(action); + + switch (action->action_type) { + case DR_ACTION_TYP_DROP: + seq_printf(file, "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id); + break; + case DR_ACTION_TYP_FT: + if (action->dest_tbl->is_fw_tbl) + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_FT, action_id, + rule_id, action->dest_tbl->fw_tbl.id); + else + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_FT, action_id, + rule_id, action->dest_tbl->tbl->table_id); + + break; + case DR_ACTION_TYP_CTR: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id, + action->ctr->ctr_id + action->ctr->offset); + break; + case DR_ACTION_TYP_TAG: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id, + action->flow_tag->flow_tag); + break; + case DR_ACTION_TYP_MODIFY_HDR: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id, + rule_id, action->rewrite->index); + break; + case DR_ACTION_TYP_VPORT: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id, + action->vport->caps->num); + break; + case DR_ACTION_TYP_TNL_L2_TO_L2: + seq_printf(file, "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id, + rule_id); + break; + case DR_ACTION_TYP_TNL_L3_TO_L2: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id, + rule_id, action->rewrite->index); + break; + case DR_ACTION_TYP_L2_TO_TNL_L2: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id, + rule_id, action->reformat->id); + break; + case DR_ACTION_TYP_L2_TO_TNL_L3: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id, + rule_id, action->reformat->id); + break; + case DR_ACTION_TYP_POP_VLAN: + seq_printf(file, "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id, + rule_id); + break; + case DR_ACTION_TYP_PUSH_VLAN: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id, + rule_id, action->push_vlan->vlan_hdr); + break; + case DR_ACTION_TYP_INSERT_HDR: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id, + rule_id, action->reformat->id, + action->reformat->param_0, + action->reformat->param_1); + break; + case DR_ACTION_TYP_REMOVE_HDR: + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id, + rule_id, action->reformat->id, + action->reformat->param_0, + action->reformat->param_1); + break; + case DR_ACTION_TYP_SAMPLER: + seq_printf(file, + "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id, + 0, 0, action->sampler->sampler_id, + action->sampler->rx_icm_addr, + action->sampler->tx_icm_addr); + break; + default: + return 0; + } + + return 0; +} + +static int +dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste, + bool is_rx, const u64 rule_id, u8 format_ver) +{ + char hw_ste_dump[DR_HEX_SIZE]; + u32 mem_rec_type; + + if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) { + mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 : + DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0; + } else { + mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 : + DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1; + } + + dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED); + + seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type, + dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id, + hw_ste_dump); + + return 0; +} + +static int +dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx, + bool is_rx, const u64 rule_id, u8 format_ver) +{ + struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES]; + struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste; + int ret, i; + + if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i)) + return 0; + + while (i--) { + ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id, + format_ver); + if (ret < 0) + return ret; + } + + return 0; +} + +static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule) +{ + struct mlx5dr_rule_action_member *action_mem; + const u64 rule_id = DR_DBG_PTR_TO_ID(rule); + struct mlx5dr_rule_rx_tx *rx = &rule->rx; + struct mlx5dr_rule_rx_tx *tx = &rule->tx; + u8 format_ver; + int ret; + + format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver; + + seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id, + DR_DBG_PTR_TO_ID(rule->matcher)); + + if (rx->nic_matcher) { + ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver); + if (ret < 0) + return ret; + } + + if (tx->nic_matcher) { + ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver); + if (ret < 0) + return ret; + } + + list_for_each_entry(action_mem, &rule->rule_actions_list, list) { + ret = dr_dump_rule_action_mem(file, rule_id, action_mem); + if (ret < 0) + return ret; + } + + return 0; +} + +static int +dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask, + u8 criteria, const u64 matcher_id) +{ + char dump[DR_HEX_SIZE]; + + seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK, + matcher_id); + + if (criteria & DR_MATCHER_CRITERIA_OUTER) { + dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer)); + seq_printf(file, "%s,", dump); + } else { + seq_puts(file, ","); + } + + if (criteria & DR_MATCHER_CRITERIA_INNER) { + dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner)); + seq_printf(file, "%s,", dump); + } else { + seq_puts(file, ","); + } + + if (criteria & DR_MATCHER_CRITERIA_MISC) { + dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc)); + seq_printf(file, "%s,", dump); + } else { + seq_puts(file, ","); + } + + if (criteria & DR_MATCHER_CRITERIA_MISC2) { + dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2)); + seq_printf(file, "%s,", dump); + } else { + seq_puts(file, ","); + } + + if (criteria & DR_MATCHER_CRITERIA_MISC3) { + dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3)); + seq_printf(file, "%s\n", dump); + } else { + seq_puts(file, ",\n"); + } + + return 0; +} + +static int +dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder, + u32 index, bool is_rx, const u64 matcher_id) +{ + seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n", + DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx, + builder->lu_type); + + return 0; +} + +static int +dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx, + struct mlx5dr_matcher_rx_tx *matcher_rx_tx, + const u64 matcher_id) +{ + enum dr_dump_rec_type rec_type; + int i, ret; + + rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX : + DR_DUMP_REC_TYPE_MATCHER_TX; + + seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n", + rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx), + matcher_id, matcher_rx_tx->num_of_builders, + dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr), + dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr)); + + for (i = 0; i < matcher_rx_tx->num_of_builders; i++) { + ret = dr_dump_matcher_builder(file, + &matcher_rx_tx->ste_builder[i], + i, is_rx, matcher_id); + if (ret < 0) + return ret; + } + + return 0; +} + +static int +dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_matcher_rx_tx *rx = &matcher->rx; + struct mlx5dr_matcher_rx_tx *tx = &matcher->tx; + u64 matcher_id; + int ret; + + matcher_id = DR_DBG_PTR_TO_ID(matcher); + + seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER, + matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio); + + ret = dr_dump_matcher_mask(file, &matcher->mask, + matcher->match_criteria, matcher_id); + if (ret < 0) + return ret; + + if (rx->nic_tbl) { + ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id); + if (ret < 0) + return ret; + } + + if (tx->nic_tbl) { + ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id); + if (ret < 0) + return ret; + } + + return 0; +} + +static int +dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher) +{ + struct mlx5dr_rule *rule; + int ret; + + ret = dr_dump_matcher(file, matcher); + if (ret < 0) + return ret; + + list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) { + ret = dr_dump_rule(file, rule); + if (ret < 0) + return ret; + } + + return 0; +} + +static int +dr_dump_table_rx_tx(struct seq_file *file, bool is_rx, + struct mlx5dr_table_rx_tx *table_rx_tx, + const u64 table_id) +{ + enum dr_dump_rec_type rec_type; + + rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX : + DR_DUMP_REC_TYPE_TABLE_TX; + + seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id, + dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr)); + + return 0; +} + +static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table) +{ + struct mlx5dr_table_rx_tx *rx = &table->rx; + struct mlx5dr_table_rx_tx *tx = &table->tx; + int ret; + + seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE, + DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn), + table->table_type, table->level); + + if (rx->nic_dmn) { + ret = dr_dump_table_rx_tx(file, true, rx, + DR_DBG_PTR_TO_ID(table)); + if (ret < 0) + return ret; + } + + if (tx->nic_dmn) { + ret = dr_dump_table_rx_tx(file, false, tx, + DR_DBG_PTR_TO_ID(table)); + if (ret < 0) + return ret; + } + return 0; +} + +static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl) +{ + struct mlx5dr_matcher *matcher; + int ret; + + ret = dr_dump_table(file, tbl); + if (ret < 0) + return ret; + + list_for_each_entry(matcher, &tbl->matcher_list, list_node) { + ret = dr_dump_matcher_all(file, matcher); + if (ret < 0) + return ret; + } + return 0; +} + +static int +dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring, + const u64 domain_id) +{ + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring), + domain_id, ring->cq->mcq.cqn, ring->qp->qpn); + return 0; +} + +static int +dr_dump_domain_info_flex_parser(struct seq_file *file, + const char *flex_parser_name, + const u8 flex_parser_value, + const u64 domain_id) +{ + seq_printf(file, "%d,0x%llx,%s,0x%x\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id, + flex_parser_name, flex_parser_value); + return 0; +} + +static int +dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps, + const u64 domain_id) +{ + struct mlx5dr_cmd_vport_cap *vport_caps; + unsigned long i, vports_num; + + xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps) + ; /* count the number of vports in xarray */ + + seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi, + caps->nic_rx_drop_address, caps->nic_tx_drop_address, + caps->flex_protocols, vports_num, caps->eswitch_manager); + + xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) { + vport_caps = xa_load(&caps->vports.vports_caps_xa, i); + + seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i, + vport_caps->vport_gvmi, vport_caps->icm_address_rx, + vport_caps->icm_address_tx); + } + return 0; +} + +static int +dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info, + const u64 domain_id) +{ + int ret; + + ret = dr_dump_domain_info_caps(file, &info->caps, domain_id); + if (ret < 0) + return ret; + + ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0", + info->caps.flex_parser_id_icmp_dw0, + domain_id); + if (ret < 0) + return ret; + + ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1", + info->caps.flex_parser_id_icmp_dw1, + domain_id); + if (ret < 0) + return ret; + + ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0", + info->caps.flex_parser_id_icmpv6_dw0, + domain_id); + if (ret < 0) + return ret; + + ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1", + info->caps.flex_parser_id_icmpv6_dw1, + domain_id); + if (ret < 0) + return ret; + + return 0; +} + +static int +dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn) +{ + u64 domain_id = DR_DBG_PTR_TO_ID(dmn); + int ret; + + seq_printf(file, "%d,0x%llx,%d,0%x,%d,%s\n", DR_DUMP_REC_TYPE_DOMAIN, + domain_id, dmn->type, dmn->info.caps.gvmi, + dmn->info.supp_sw_steering, pci_name(dmn->mdev->pdev)); + + ret = dr_dump_domain_info(file, &dmn->info, domain_id); + if (ret < 0) + return ret; + + if (dmn->info.supp_sw_steering) { + ret = dr_dump_send_ring(file, dmn->send_ring, domain_id); + if (ret < 0) + return ret; + } + + return 0; +} + +static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn) +{ + struct mlx5dr_table *tbl; + int ret; + + mutex_lock(&dmn->dump_info.dbg_mutex); + mlx5dr_domain_lock(dmn); + + ret = dr_dump_domain(file, dmn); + if (ret < 0) + goto unlock_mutex; + + list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) { + ret = dr_dump_table_all(file, tbl); + if (ret < 0) + break; + } + +unlock_mutex: + mlx5dr_domain_unlock(dmn); + mutex_unlock(&dmn->dump_info.dbg_mutex); + return ret; +} + +static int dr_dump_show(struct seq_file *file, void *priv) +{ + return dr_dump_domain_all(file, file->private); +} +DEFINE_SHOW_ATTRIBUTE(dr_dump); + +void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn) +{ + struct mlx5_core_dev *dev = dmn->mdev; + char file_name[128]; + + if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) { + mlx5_core_warn(dev, + "Steering dump is not supported for NIC RX/TX domains\n"); + return; + } + + dmn->dump_info.steering_debugfs = + debugfs_create_dir("steering", dev->priv.dbg_root); + dmn->dump_info.fdb_debugfs = + debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs); + + sprintf(file_name, "dmn_%p", dmn); + debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs, + dmn, &dr_dump_fops); + + INIT_LIST_HEAD(&dmn->dbg_tbl_list); + mutex_init(&dmn->dump_info.dbg_mutex); +} + +void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn) +{ + debugfs_remove_recursive(dmn->dump_info.steering_debugfs); + mutex_destroy(&dmn->dump_info.dbg_mutex); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h new file mode 100644 index 000000000000..def6cf853eea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +struct mlx5dr_dbg_dump_info { + struct mutex dbg_mutex; /* protect dbg lists */ + struct dentry *steering_debugfs; + struct dentry *fdb_debugfs; +}; + +void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn); +void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn); +void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl); +void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl); +void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule); +void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 49089cbe897c..5fa7f9d6d8b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include <linux/mlx5/eswitch.h> +#include <linux/err.h> #include "dr_types.h" #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ @@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) } dmn->uar = mlx5_get_uars_page(dmn->mdev); - if (!dmn->uar) { + if (IS_ERR(dmn->uar)) { mlx5dr_err(dmn, "Couldn't allocate UAR\n"); - ret = -ENOMEM; + ret = PTR_ERR(dmn->uar); goto clean_pd; } @@ -135,25 +136,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn, static int dr_domain_query_vport(struct mlx5dr_domain *dmn, u16 vport_number, + bool other_vport, struct mlx5dr_cmd_vport_cap *vport_caps) { - u16 cmd_vport = vport_number; - bool other_vport = true; int ret; - if (vport_number == MLX5_VPORT_UPLINK) { - dr_domain_fill_uplink_caps(dmn, vport_caps); - return 0; - } - - if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) { - other_vport = false; - cmd_vport = 0; - } - ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->icm_address_rx, &vport_caps->icm_address_tx); if (ret) @@ -161,7 +151,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ret = mlx5dr_cmd_query_gvmi(dmn->mdev, other_vport, - cmd_vport, + vport_number, &vport_caps->vport_gvmi); if (ret) return ret; @@ -174,11 +164,15 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn) { - return dr_domain_query_vport(dmn, - dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0, + return dr_domain_query_vport(dmn, 0, false, &dmn->info.caps.vports.esw_manager_caps); } +static void dr_domain_query_uplink(struct mlx5dr_domain *dmn) +{ + dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps); +} + static struct mlx5dr_cmd_vport_cap * dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { @@ -190,7 +184,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (!vport_caps) return NULL; - ret = dr_domain_query_vport(dmn, vport, vport_caps); + ret = dr_domain_query_vport(dmn, vport, true, vport_caps); if (ret) { kvfree(vport_caps); return NULL; @@ -207,16 +201,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) return vport_caps; } +static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport) +{ + struct mlx5dr_cmd_caps *caps = &dmn->info.caps; + + return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) || + (!caps->is_ecpf && vport == 0); +} + struct mlx5dr_cmd_vport_cap * mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport) { struct mlx5dr_cmd_caps *caps = &dmn->info.caps; struct mlx5dr_cmd_vport_cap *vport_caps; - if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) || - (!caps->is_ecpf && vport == 0)) + if (dr_domain_is_esw_mgr_vport(dmn, vport)) return &caps->vports.esw_manager_caps; + if (vport == MLX5_VPORT_UPLINK) + return &caps->vports.uplink_caps; + vport_load: vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); if (vport_caps) @@ -241,17 +245,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn) } } -static int dr_domain_query_uplink(struct mlx5dr_domain *dmn) -{ - struct mlx5dr_cmd_vport_cap *vport_caps; - - vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK); - if (!vport_caps) - return -EINVAL; - - return 0; -} - static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, struct mlx5dr_domain *dmn) { @@ -281,11 +274,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, goto free_vports_caps_xa; } - ret = dr_domain_query_uplink(dmn); - if (ret) { - mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret); - goto free_vports_caps_xa; - } + dr_domain_query_uplink(dmn); return 0; @@ -406,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) } dr_domain_init_csum_recalc_fts(dmn); - + mlx5dr_dbg_init_dump(dmn); return dmn; uninit_caps: @@ -442,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) { - if (refcount_read(&dmn->refcount) > 1) + if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1)) return -EBUSY; /* make sure resources are not used by the hardware */ mlx5dr_cmd_sync_steering(dmn->mdev); + mlx5dr_dbg_uninit_dump(dmn); dr_domain_uninit_csum_recalc_fts(dmn); dr_domain_uninit_resources(dmn); dr_domain_caps_uninit(dmn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 75c775bee351..e87cf498c77b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -141,6 +141,19 @@ static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3) } static bool +dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_parser_ok_bits_supp; +} + +static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc, + struct mlx5dr_domain *dmn) +{ + return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) && + misc->geneve_tlv_option_0_exist; +} + +static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || @@ -359,7 +372,7 @@ static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask, static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED; + return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED; } static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask, @@ -368,6 +381,12 @@ static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask, return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) && dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps); } + +static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5) +{ + return misc5->tunnel_header_0 || misc5->tunnel_header_1; +} + int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, @@ -424,6 +443,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) mask.misc4 = matcher->mask.misc4; + if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5) + mask.misc5 = matcher->mask.misc5; + ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, &matcher->mask, NULL); if (ret) @@ -443,7 +465,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER | DR_MATCHER_CRITERIA_MISC | DR_MATCHER_CRITERIA_MISC2 | - DR_MATCHER_CRITERIA_MISC3)) { + DR_MATCHER_CRITERIA_MISC3 | + DR_MATCHER_CRITERIA_MISC5)) { inner = false; if (dr_mask_is_wqe_metadata_set(&mask.misc2)) @@ -511,6 +534,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++], &mask, &dmn->info.caps, inner, rx); + if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn)) + mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++], + &mask, &dmn->info.caps, + inner, rx); } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) { if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn)) mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++], @@ -525,6 +552,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_tnl_gtpu(&mask, dmn)) mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++], &mask, inner, rx); + } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) { + mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++], + &mask, inner, rx); } if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) @@ -653,10 +683,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, return 0; } -static int dr_matcher_connect(struct mlx5dr_domain *dmn, - struct mlx5dr_matcher_rx_tx *curr_nic_matcher, - struct mlx5dr_matcher_rx_tx *next_nic_matcher, - struct mlx5dr_matcher_rx_tx *prev_nic_matcher) +static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *curr_nic_matcher, + struct mlx5dr_matcher_rx_tx *next_nic_matcher, + struct mlx5dr_matcher_rx_tx *prev_nic_matcher) { struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl; struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn; @@ -712,58 +742,50 @@ static int dr_matcher_connect(struct mlx5dr_domain *dmn, return 0; } -static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher) +int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *nic_matcher) { - struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher; - struct mlx5dr_table *tbl = matcher->tbl; - struct mlx5dr_domain *dmn = tbl->dmn; + struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher; + struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl; bool first = true; int ret; - next_matcher = NULL; - list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { - if (tmp_matcher->prio >= matcher->prio) { - next_matcher = tmp_matcher; + /* If the nic matcher is already on its parent nic table list, + * then it is already connected to the chain of nic matchers. + */ + if (!list_empty(&nic_matcher->list_node)) + return 0; + + next_nic_matcher = NULL; + list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) { + if (tmp_nic_matcher->prio >= nic_matcher->prio) { + next_nic_matcher = tmp_nic_matcher; break; } first = false; } - prev_matcher = NULL; - if (next_matcher && !first) - prev_matcher = list_prev_entry(next_matcher, matcher_list); + prev_nic_matcher = NULL; + if (next_nic_matcher && !first) + prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node); else if (!first) - prev_matcher = list_last_entry(&tbl->matcher_list, - struct mlx5dr_matcher, - matcher_list); - - if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || - dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) { - ret = dr_matcher_connect(dmn, &matcher->rx, - next_matcher ? &next_matcher->rx : NULL, - prev_matcher ? &prev_matcher->rx : NULL); - if (ret) - return ret; - } + prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list, + struct mlx5dr_matcher_rx_tx, + list_node); - if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || - dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) { - ret = dr_matcher_connect(dmn, &matcher->tx, - next_matcher ? &next_matcher->tx : NULL, - prev_matcher ? &prev_matcher->tx : NULL); - if (ret) - return ret; - } + ret = dr_nic_matcher_connect(dmn, nic_matcher, + next_nic_matcher, prev_nic_matcher); + if (ret) + return ret; - if (prev_matcher) - list_add(&matcher->matcher_list, &prev_matcher->matcher_list); - else if (next_matcher) - list_add_tail(&matcher->matcher_list, - &next_matcher->matcher_list); + if (prev_nic_matcher) + list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node); + else if (next_nic_matcher) + list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node); else - list_add(&matcher->matcher_list, &tbl->matcher_list); + list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list); - return 0; + return ret; } static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher) @@ -822,6 +844,9 @@ static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher, struct mlx5dr_domain *dmn = matcher->tbl->dmn; int ret; + nic_matcher->prio = matcher->prio; + INIT_LIST_HEAD(&nic_matcher->list_node); + ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher); if (ret) return ret; @@ -872,13 +897,12 @@ uninit_nic_rx: return ret; } -static int dr_matcher_init(struct mlx5dr_matcher *matcher, - struct mlx5dr_match_parameters *mask) +static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *mask) { + struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_match_parameters consumed_mask; - struct mlx5dr_table *tbl = matcher->tbl; - struct mlx5dr_domain *dmn = tbl->dmn; - int i, ret; + int i, ret = 0; if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) { mlx5dr_err(dmn, "Invalid match criteria attribute\n"); @@ -898,10 +922,36 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, consumed_mask.match_sz = mask->match_sz; memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz); mlx5dr_ste_copy_param(matcher->match_criteria, - &matcher->mask, &consumed_mask, - true); + &matcher->mask, &consumed_mask, true); + + /* Check that all mask data was consumed */ + for (i = 0; i < consumed_mask.match_sz; i++) { + if (!((u8 *)consumed_mask.match_buf)[i]) + continue; + + mlx5dr_dbg(dmn, + "Match param mask contains unsupported parameters\n"); + ret = -EOPNOTSUPP; + break; + } + + kfree(consumed_mask.match_buf); } + return ret; +} + +static int dr_matcher_init(struct mlx5dr_matcher *matcher, + struct mlx5dr_match_parameters *mask) +{ + struct mlx5dr_table *tbl = matcher->tbl; + struct mlx5dr_domain *dmn = tbl->dmn; + int ret; + + ret = dr_matcher_copy_param(matcher, mask); + if (ret) + return ret; + switch (dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: matcher->rx.nic_tbl = &tbl->rx; @@ -919,24 +969,25 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, default: WARN_ON(true); ret = -EINVAL; - goto free_consumed_mask; - } - - /* Check that all mask data was consumed */ - for (i = 0; i < consumed_mask.match_sz; i++) { - if (consumed_mask.match_buf[i]) { - mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n"); - ret = -EOPNOTSUPP; - goto free_consumed_mask; - } } - ret = 0; -free_consumed_mask: - kfree(consumed_mask.match_buf); return ret; } +static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher) +{ + mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex); + list_add(&matcher->list_node, &matcher->tbl->matcher_list); + mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex); +} + +static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher) +{ + mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex); + list_del(&matcher->list_node); + mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex); +} + struct mlx5dr_matcher * mlx5dr_matcher_create(struct mlx5dr_table *tbl, u32 priority, @@ -956,7 +1007,8 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl, matcher->prio = priority; matcher->match_criteria = match_criteria_enable; refcount_set(&matcher->refcount, 1); - INIT_LIST_HEAD(&matcher->matcher_list); + INIT_LIST_HEAD(&matcher->list_node); + INIT_LIST_HEAD(&matcher->dbg_rule_list); mlx5dr_domain_lock(tbl->dmn); @@ -964,16 +1016,12 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl, if (ret) goto free_matcher; - ret = dr_matcher_add_to_tbl(matcher); - if (ret) - goto matcher_uninit; + dr_matcher_add_to_dbg_list(matcher); mlx5dr_domain_unlock(tbl->dmn); return matcher; -matcher_uninit: - dr_matcher_uninit(matcher); free_matcher: mlx5dr_domain_unlock(tbl->dmn); kfree(matcher); @@ -982,10 +1030,10 @@ dec_ref: return NULL; } -static int dr_matcher_disconnect(struct mlx5dr_domain *dmn, - struct mlx5dr_table_rx_tx *nic_tbl, - struct mlx5dr_matcher_rx_tx *next_nic_matcher, - struct mlx5dr_matcher_rx_tx *prev_nic_matcher) +static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_table_rx_tx *nic_tbl, + struct mlx5dr_matcher_rx_tx *next_nic_matcher, + struct mlx5dr_matcher_rx_tx *prev_nic_matcher) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn; struct mlx5dr_htbl_connect_info info; @@ -1012,43 +1060,34 @@ static int dr_matcher_disconnect(struct mlx5dr_domain *dmn, &info, true); } -static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher) +int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *nic_matcher) { - struct mlx5dr_matcher *prev_matcher, *next_matcher; - struct mlx5dr_table *tbl = matcher->tbl; - struct mlx5dr_domain *dmn = tbl->dmn; - int ret = 0; + struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher; + struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl; + int ret; - if (list_is_last(&matcher->matcher_list, &tbl->matcher_list)) - next_matcher = NULL; - else - next_matcher = list_next_entry(matcher, matcher_list); + /* If the nic matcher is not on its parent nic table list, + * then it is detached - no need to disconnect it. + */ + if (list_empty(&nic_matcher->list_node)) + return 0; - if (matcher->matcher_list.prev == &tbl->matcher_list) - prev_matcher = NULL; + if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list)) + next_nic_matcher = NULL; else - prev_matcher = list_prev_entry(matcher, matcher_list); - - if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || - dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) { - ret = dr_matcher_disconnect(dmn, &tbl->rx, - next_matcher ? &next_matcher->rx : NULL, - prev_matcher ? &prev_matcher->rx : NULL); - if (ret) - return ret; - } + next_nic_matcher = list_next_entry(nic_matcher, list_node); - if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB || - dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) { - ret = dr_matcher_disconnect(dmn, &tbl->tx, - next_matcher ? &next_matcher->tx : NULL, - prev_matcher ? &prev_matcher->tx : NULL); - if (ret) - return ret; - } + if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list) + prev_nic_matcher = NULL; + else + prev_nic_matcher = list_prev_entry(nic_matcher, list_node); - list_del(&matcher->matcher_list); + ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher); + if (ret) + return ret; + list_del_init(&nic_matcher->list_node); return 0; } @@ -1056,12 +1095,12 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { struct mlx5dr_table *tbl = matcher->tbl; - if (refcount_read(&matcher->refcount) > 1) + if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1)) return -EBUSY; mlx5dr_domain_lock(tbl->dmn); - dr_matcher_remove_from_tbl(matcher); + dr_matcher_remove_from_dbg_list(matcher); dr_matcher_uninit(matcher); refcount_dec(&matcher->tbl->refcount); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 6a390e981b09..b4374578425b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -5,11 +5,6 @@ #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES) -struct mlx5dr_rule_action_member { - struct mlx5dr_action *action; - struct list_head list; -}; - static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste *new_last_ste, struct list_head *miss_list, @@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, return false; } } + + if (match_criteria & DR_MATCHER_CRITERIA_MISC5) { + s_idx = offsetof(struct mlx5dr_match_param, misc5); + e_idx = min(s_idx + sizeof(param->misc5), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { + mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n"); + return false; + } + } return true; } static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule) { + /* Check if this nic rule was actually created, or was it skipped + * and only the other type of the RX/TX nic rule was created. + */ + if (!nic_rule->last_rule_ste) + return 0; + mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn); dr_rule_clean_rule_members(rule, nic_rule); + + nic_rule->nic_matcher->rules--; + if (!nic_rule->nic_matcher->rules) + mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn, + nic_rule->nic_matcher); + mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn); return 0; @@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule) { struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn; + mlx5dr_dbg_rule_del(rule); + switch (dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: dr_rule_destroy_rule_nic(rule, &rule->rx); @@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, mlx5dr_domain_nic_lock(nic_dmn); + ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher); + if (ret) + goto free_hw_ste; + ret = mlx5dr_matcher_select_builders(matcher, nic_matcher, dr_rule_get_ipv(¶m->outer), dr_rule_get_ipv(¶m->inner)); if (ret) - goto free_hw_ste; + goto remove_from_nic_tbl; /* Set the tag values inside the ste array */ ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr); if (ret) - goto free_hw_ste; + goto remove_from_nic_tbl; /* Set the actions values/addresses inside the ste array */ ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions, num_actions, hw_ste_arr, &new_hw_ste_arr_sz); if (ret) - goto free_hw_ste; + goto remove_from_nic_tbl; cur_htbl = nic_matcher->s_htbl; @@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, if (htbl) mlx5dr_htbl_put(htbl); + nic_matcher->rules++; + mlx5dr_domain_nic_unlock(nic_dmn); kfree(hw_ste_arr); @@ -1168,6 +1193,10 @@ free_rule: list_del(&ste_info->send_list); kfree(ste_info); } + +remove_from_nic_tbl: + mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher); + free_hw_ste: mlx5dr_domain_nic_unlock(nic_dmn); kfree(hw_ste_arr); @@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher, if (ret) goto remove_action_members; + INIT_LIST_HEAD(&rule->dbg_node); + mlx5dr_dbg_rule_add(rule); return rule; remove_action_members: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 219a5474a8a4..7e61742e58a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr); spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr); + spec->geneve_tlv_option_0_exist = + IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr); spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr); spec->outer_ipv6_flow_label = @@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr); } +static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr) +{ + spec->macsec_tag_0 = + IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr); + spec->macsec_tag_1 = + IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr); + spec->macsec_tag_2 = + IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr); + spec->macsec_tag_3 = + IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr); + spec->tunnel_header_0 = + IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr); + spec->tunnel_header_1 = + IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr); + spec->tunnel_header_2 = + IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr); + spec->tunnel_header_3 = + IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr); +} + void mlx5dr_ste_copy_param(u8 match_criteria, struct mlx5dr_match_param *set_param, struct mlx5dr_match_parameters *mask, @@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria, } dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr); } + + param_location += sizeof(struct mlx5dr_match_misc4); + + if (match_criteria & DR_MATCHER_CRITERIA_MISC5) { + if (mask->match_sz < param_location + + sizeof(struct mlx5dr_match_misc5)) { + memcpy(tail_param, data + param_location, + mask->match_sz - param_location); + buff = tail_param; + } else { + buff = data + param_location; + } + dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr); + } } void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, @@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask); } +void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) +{ + if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init) + return; + + sb->rx = rx; + sb->caps = caps; + sb->inner = inner; + ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask); +} + void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_flex_parser_1_init(sb, mask); } +void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) +{ + sb->rx = rx; + sb->inner = inner; + ste_ctx->build_tnl_header_0_1_init(sb, mask); +} + static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 2d52d065dc8b..ca8fa32b8680 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx { void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); void DR_STE_CTX_BUILDER(tnl_geneve); void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt); + void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist); void DR_STE_CTX_BUILDER(register_0); void DR_STE_CTX_BUILDER(register_1); void DR_STE_CTX_BUILDER(src_gvmi_qpn); void DR_STE_CTX_BUILDER(flex_parser_0); void DR_STE_CTX_BUILDER(flex_parser_1); void DR_STE_CTX_BUILDER(tnl_gtpu); + void DR_STE_CTX_BUILDER(tnl_header_0_1); void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0); void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index b0649c2877dd..2d62950f7a29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -80,6 +80,7 @@ enum { DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18, DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f, DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30, + DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34, DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE, }; @@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id, u32 id = *misc4_field_id; u8 *parser_ptr; - if (parser_is_used[id]) + if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id]) return; parser_is_used[id] = true; @@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag; } +static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + struct mlx5dr_match_misc5 *misc5 = &value->misc5; + + DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0); + DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1); + + return 0; +} + +static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER; + dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag; +} + struct mlx5dr_ste_ctx ste_ctx_v0 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, @@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { .build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init, .build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init, .build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init, .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init, .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index cb9cf67b0a02..6ca06800f1d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -47,6 +47,7 @@ enum { DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f, DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f, DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110, + DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011, DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111, DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112, DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113, @@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag; } +static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + struct mlx5dr_match_misc5 *misc5 = &value->misc5; + + DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0); + DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1); + + return 0; +} + +static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; + dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag; +} + static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, u8 *tag) @@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id, u32 id = *misc4_field_id; u8 *parser_ptr; - if (parser_is_used[id]) + if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id]) return; parser_is_used[id] = true; @@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag; } +static int +dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value, + struct mlx5dr_ste_build *sb, + uint8_t *tag) +{ + u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0; + struct mlx5dr_match_misc *misc = &value->misc; + + if (misc->geneve_tlv_option_0_exist) { + MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id); + misc->geneve_tlv_option_0_exist = 0; + } + + return 0; +} + +static void +dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) +{ + sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK; + dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask); + sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); + sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag; +} + static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, u8 *tag) @@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, + .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init, .build_register_0_init = &dr_ste_v1_build_register_0_init, .build_register_1_init = &dr_ste_v1_build_register_1_init, .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index 30ae3cda6d2e..8ca110643cc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -3,69 +3,66 @@ #include "dr_types.h" -int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, - struct mlx5dr_action *action) +static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_table_rx_tx *nic_tbl, + struct mlx5dr_action *action) { - struct mlx5dr_matcher *last_matcher = NULL; + struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL; struct mlx5dr_htbl_connect_info info; struct mlx5dr_ste_htbl *last_htbl; int ret; + if (!list_empty(&nic_tbl->nic_matcher_list)) + last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list, + struct mlx5dr_matcher_rx_tx, + list_node); + + if (last_nic_matcher) + last_htbl = last_nic_matcher->e_anchor; + else + last_htbl = nic_tbl->s_anchor; + + if (action) + nic_tbl->default_icm_addr = + nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ? + action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : + action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr; + else + nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr; + + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_tbl->default_icm_addr; + + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn, + last_htbl, &info, true); + if (ret) + mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret); + + return ret; +} + +int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, + struct mlx5dr_action *action) +{ + int ret; + if (action && action->action_type != DR_ACTION_TYP_FT) return -EOPNOTSUPP; mlx5dr_domain_lock(tbl->dmn); - if (!list_empty(&tbl->matcher_list)) - last_matcher = list_last_entry(&tbl->matcher_list, - struct mlx5dr_matcher, - matcher_list); - if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX || tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { - if (last_matcher) - last_htbl = last_matcher->rx.e_anchor; - else - last_htbl = tbl->rx.s_anchor; - - tbl->rx.default_icm_addr = action ? - action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr : - tbl->rx.nic_dmn->default_icm_addr; - - info.type = CONNECT_MISS; - info.miss_icm_addr = tbl->rx.default_icm_addr; - - ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, - tbl->rx.nic_dmn, - last_htbl, - &info, true); - if (ret) { - mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret); + ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action); + if (ret) goto out; - } } if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX || tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) { - if (last_matcher) - last_htbl = last_matcher->tx.e_anchor; - else - last_htbl = tbl->tx.s_anchor; - - tbl->tx.default_icm_addr = action ? - action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr : - tbl->tx.nic_dmn->default_icm_addr; - - info.type = CONNECT_MISS; - info.miss_icm_addr = tbl->tx.default_icm_addr; - - ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn, - tbl->tx.nic_dmn, - last_htbl, &info, true); - if (ret) { - mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret); + ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action); + if (ret) goto out; - } } /* Release old action */ @@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn, struct mlx5dr_htbl_connect_info info; int ret; + INIT_LIST_HEAD(&nic_tbl->nic_matcher_list); + nic_tbl->default_icm_addr = nic_dmn->default_icm_addr; nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, @@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u if (ret) goto uninit_tbl; + INIT_LIST_HEAD(&tbl->dbg_node); + mlx5dr_dbg_tbl_add(tbl); return tbl; uninit_tbl: @@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl) { int ret; - if (refcount_read(&tbl->refcount) > 1) + if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1)) return -EBUSY; + mlx5dr_dbg_tbl_del(tbl); ret = dr_table_destroy_sw_owned_tbl(tbl); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 3028b776da00..1b3d484b99be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -11,6 +11,7 @@ #include "lib/mlx5.h" #include "mlx5_ifc_dr.h" #include "mlx5dr.h" +#include "dr_dbg.h" #define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 @@ -104,7 +105,8 @@ enum mlx5dr_matcher_criteria { DR_MATCHER_CRITERIA_MISC2 = 1 << 3, DR_MATCHER_CRITERIA_MISC3 = 1 << 4, DR_MATCHER_CRITERIA_MISC4 = 1 << 5, - DR_MATCHER_CRITERIA_MAX = 1 << 6, + DR_MATCHER_CRITERIA_MISC5 = 1 << 6, + DR_MATCHER_CRITERIA_MAX = 1 << 7, }; enum mlx5dr_action_type { @@ -440,6 +442,11 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, bool inner, bool rx); +void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -454,6 +461,10 @@ void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, bool inner, bool rx); +void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx, + struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, @@ -494,57 +505,64 @@ struct mlx5dr_match_spec { /* Incoming packet Ethertype - this is the Ethertype * following the last VLAN tag of the packet */ - u32 ethertype:16; u32 smac_15_0:16; /* Source MAC address of incoming packet */ + u32 ethertype:16; + u32 dmac_47_16; /* Destination MAC address of incoming packet */ - /* VLAN ID of first VLAN tag in the incoming packet. + + u32 dmac_15_0:16; /* Destination MAC address of incoming packet */ + /* Priority of first VLAN tag in the incoming packet. * Valid only when cvlan_tag==1 or svlan_tag==1 */ - u32 first_vid:12; + u32 first_prio:3; /* CFI bit of first VLAN tag in the incoming packet. * Valid only when cvlan_tag==1 or svlan_tag==1 */ u32 first_cfi:1; - /* Priority of first VLAN tag in the incoming packet. + /* VLAN ID of first VLAN tag in the incoming packet. * Valid only when cvlan_tag==1 or svlan_tag==1 */ - u32 first_prio:3; - u32 dmac_15_0:16; /* Destination MAC address of incoming packet */ - /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK; - * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS + u32 first_vid:12; + + u32 ip_protocol:8; /* IP protocol */ + /* Differentiated Services Code Point derived from + * Traffic Class/TOS field of IPv6/v4 */ - u32 tcp_flags:9; - u32 ip_version:4; /* IP version */ - u32 frag:1; /* Packet is an IP fragment */ - /* The first vlan in the packet is s-vlan (0x8a88). - * cvlan_tag and svlan_tag cannot be set together + u32 ip_dscp:6; + /* Explicit Congestion Notification derived from + * Traffic Class/TOS field of IPv6/v4 */ - u32 svlan_tag:1; + u32 ip_ecn:2; /* The first vlan in the packet is c-vlan (0x8100). * cvlan_tag and svlan_tag cannot be set together */ u32 cvlan_tag:1; - /* Explicit Congestion Notification derived from - * Traffic Class/TOS field of IPv6/v4 + /* The first vlan in the packet is s-vlan (0x8a88). + * cvlan_tag and svlan_tag cannot be set together */ - u32 ip_ecn:2; - /* Differentiated Services Code Point derived from - * Traffic Class/TOS field of IPv6/v4 + u32 svlan_tag:1; + u32 frag:1; /* Packet is an IP fragment */ + u32 ip_version:4; /* IP version */ + /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK; + * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS */ - u32 ip_dscp:6; - u32 ip_protocol:8; /* IP protocol */ + u32 tcp_flags:9; + + /* TCP source port.;tcp and udp sport/dport are mutually exclusive */ + u32 tcp_sport:16; /* TCP destination port. * tcp and udp sport/dport are mutually exclusive */ u32 tcp_dport:16; - /* TCP source port.;tcp and udp sport/dport are mutually exclusive */ - u32 tcp_sport:16; + + u32 reserved_auto1:24; u32 ttl_hoplimit:8; - u32 reserved:24; - /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */ - u32 udp_dport:16; + /* UDP source port.;tcp and udp sport/dport are mutually exclusive */ u32 udp_sport:16; + /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */ + u32 udp_dport:16; + /* IPv6 source address of incoming packets * For IPv4 address use bits 31:0 (rest of the bits are reserved) * This field should be qualified by an appropriate ethertype @@ -588,96 +606,114 @@ struct mlx5dr_match_spec { }; struct mlx5dr_match_misc { - u32 source_sqn:24; /* Source SQN */ - u32 source_vhca_port:4; - /* used with GRE, sequence number exist when gre_s_present == 1 */ - u32 gre_s_present:1; - /* used with GRE, key exist when gre_k_present == 1 */ - u32 gre_k_present:1; - u32 reserved_auto1:1; /* used with GRE, checksum exist when gre_c_present == 1 */ u32 gre_c_present:1; + u32 reserved_auto1:1; + /* used with GRE, key exist when gre_k_present == 1 */ + u32 gre_k_present:1; + /* used with GRE, sequence number exist when gre_s_present == 1 */ + u32 gre_s_present:1; + u32 source_vhca_port:4; + u32 source_sqn:24; /* Source SQN */ + + u32 source_eswitch_owner_vhca_id:16; /* Source port.;0xffff determines wire port */ u32 source_port:16; - u32 source_eswitch_owner_vhca_id:16; - /* VLAN ID of first VLAN tag the inner header of the incoming packet. - * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 - */ - u32 inner_second_vid:12; - /* CFI bit of first VLAN tag in the inner header of the incoming packet. - * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 - */ - u32 inner_second_cfi:1; - /* Priority of second VLAN tag in the inner header of the incoming packet. - * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 - */ - u32 inner_second_prio:3; - /* VLAN ID of first VLAN tag the outer header of the incoming packet. + + /* Priority of second VLAN tag in the outer header of the incoming packet. * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 */ - u32 outer_second_vid:12; + u32 outer_second_prio:3; /* CFI bit of first VLAN tag in the outer header of the incoming packet. * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 */ u32 outer_second_cfi:1; - /* Priority of second VLAN tag in the outer header of the incoming packet. + /* VLAN ID of first VLAN tag the outer header of the incoming packet. * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1 */ - u32 outer_second_prio:3; - u32 gre_protocol:16; /* GRE Protocol (outer) */ - u32 reserved_auto3:12; - /* The second vlan in the inner header of the packet is s-vlan (0x8a88). - * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together + u32 outer_second_vid:12; + /* Priority of second VLAN tag in the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 */ - u32 inner_second_svlan_tag:1; - /* The second vlan in the outer header of the packet is s-vlan (0x8a88). + u32 inner_second_prio:3; + /* CFI bit of first VLAN tag in the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 + */ + u32 inner_second_cfi:1; + /* VLAN ID of first VLAN tag the inner header of the incoming packet. + * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1 + */ + u32 inner_second_vid:12; + + u32 outer_second_cvlan_tag:1; + u32 inner_second_cvlan_tag:1; + /* The second vlan in the outer header of the packet is c-vlan (0x8100). * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together */ u32 outer_second_svlan_tag:1; /* The second vlan in the inner header of the packet is c-vlan (0x8100). * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together */ - u32 inner_second_cvlan_tag:1; - /* The second vlan in the outer header of the packet is c-vlan (0x8100). + u32 inner_second_svlan_tag:1; + /* The second vlan in the outer header of the packet is s-vlan (0x8a88). * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together */ - u32 outer_second_cvlan_tag:1; - u32 gre_key_l:8; /* GRE Key [7:0] (outer) */ + u32 reserved_auto2:12; + /* The second vlan in the inner header of the packet is s-vlan (0x8a88). + * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together + */ + u32 gre_protocol:16; /* GRE Protocol (outer) */ + u32 gre_key_h:24; /* GRE Key[31:8] (outer) */ - u32 reserved_auto4:8; + u32 gre_key_l:8; /* GRE Key [7:0] (outer) */ + u32 vxlan_vni:24; /* VXLAN VNI (outer) */ - u32 geneve_oam:1; /* GENEVE OAM field (outer) */ - u32 reserved_auto5:7; + u32 reserved_auto3:8; + u32 geneve_vni:24; /* GENEVE VNI field (outer) */ + u32 reserved_auto4:6; + u32 geneve_tlv_option_0_exist:1; + u32 geneve_oam:1; /* GENEVE OAM field (outer) */ + + u32 reserved_auto5:12; u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */ + u32 reserved_auto6:12; u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */ - u32 reserved_auto7:12; - u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */ + + u32 reserved_auto7:10; u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */ - u32 reserved_auto8:10; + u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */ + + u32 reserved_auto8:8; u32 bth_dst_qp:24; /* Destination QP in BTH header */ - u32 reserved_auto9:8; - u8 reserved_auto10[20]; + + u32 reserved_auto9; + u32 outer_esp_spi; + u32 reserved_auto10[3]; }; struct mlx5dr_match_misc2 { - u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */ - u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */ - u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */ u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */ - u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */ - u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */ - u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */ + u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */ + u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */ + u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */ + u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */ - u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */ - u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */ - u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */ + u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */ + u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */ + u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */ + u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */ - u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */ - u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */ - u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */ + u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */ + u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */ + u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */ + u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */ + u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */ + u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */ + u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */ + u32 metadata_reg_c_7; /* metadata_reg_c_7 */ u32 metadata_reg_c_6; /* metadata_reg_c_6 */ u32 metadata_reg_c_5; /* metadata_reg_c_5 */ @@ -687,7 +723,7 @@ struct mlx5dr_match_misc2 { u32 metadata_reg_c_1; /* metadata_reg_c_1 */ u32 metadata_reg_c_0; /* metadata_reg_c_0 */ u32 metadata_reg_a; /* metadata_reg_a */ - u8 reserved_auto2[12]; + u32 reserved_auto1[3]; }; struct mlx5dr_match_misc3 { @@ -695,24 +731,34 @@ struct mlx5dr_match_misc3 { u32 outer_tcp_seq_num; u32 inner_tcp_ack_num; u32 outer_tcp_ack_num; - u32 outer_vxlan_gpe_vni:24; + u32 reserved_auto1:8; - u32 reserved_auto2:16; - u32 outer_vxlan_gpe_flags:8; + u32 outer_vxlan_gpe_vni:24; + u32 outer_vxlan_gpe_next_protocol:8; + u32 outer_vxlan_gpe_flags:8; + u32 reserved_auto2:16; + u32 icmpv4_header_data; u32 icmpv6_header_data; - u8 icmpv6_code; - u8 icmpv6_type; - u8 icmpv4_code; + u8 icmpv4_type; + u8 icmpv4_code; + u8 icmpv6_type; + u8 icmpv6_code; + u32 geneve_tlv_option_0_data; - u8 gtpu_msg_flags; - u8 gtpu_msg_type; + u32 gtpu_teid; + + u8 gtpu_msg_type; + u8 gtpu_msg_flags; + u32 reserved_auto3:16; + u32 gtpu_dw_2; u32 gtpu_first_ext_dw_0; u32 gtpu_dw_0; + u32 reserved_auto4; }; struct mlx5dr_match_misc4 { @@ -724,6 +770,18 @@ struct mlx5dr_match_misc4 { u32 prog_sample_field_id_2; u32 prog_sample_field_value_3; u32 prog_sample_field_id_3; + u32 reserved_auto1[8]; +}; + +struct mlx5dr_match_misc5 { + u32 macsec_tag_0; + u32 macsec_tag_1; + u32 macsec_tag_2; + u32 macsec_tag_3; + u32 tunnel_header_0; + u32 tunnel_header_1; + u32 tunnel_header_2; + u32 tunnel_header_3; }; struct mlx5dr_match_param { @@ -733,6 +791,7 @@ struct mlx5dr_match_param { struct mlx5dr_match_misc2 misc2; struct mlx5dr_match_misc3 misc3; struct mlx5dr_match_misc4 misc4; + struct mlx5dr_match_misc5 misc5; }; #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ @@ -764,6 +823,7 @@ struct mlx5dr_roce_cap { struct mlx5dr_vports { struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct mlx5dr_cmd_vport_cap uplink_caps; struct xarray vports_caps_xa; }; @@ -788,6 +848,7 @@ struct mlx5dr_cmd_caps { u8 flex_parser_id_gtpu_teid; u8 flex_parser_id_gtpu_dw_2; u8 flex_parser_id_gtpu_first_ext_dw_0; + u8 flex_parser_ok_bits_supp; u8 max_ft_level; u16 roce_min_src_udp; u8 sw_format_ver; @@ -842,12 +903,15 @@ struct mlx5dr_domain { struct mlx5dr_domain_info info; struct xarray csum_fts_xa; struct mlx5dr_ste_ctx *ste_ctx; + struct list_head dbg_tbl_list; + struct mlx5dr_dbg_dump_info dump_info; }; struct mlx5dr_table_rx_tx { struct mlx5dr_ste_htbl *s_anchor; struct mlx5dr_domain_rx_tx *nic_dmn; u64 default_icm_addr; + struct list_head nic_matcher_list; }; struct mlx5dr_table { @@ -861,6 +925,7 @@ struct mlx5dr_table { struct list_head matcher_list; struct mlx5dr_action *miss_action; refcount_t refcount; + struct list_head dbg_node; }; struct mlx5dr_matcher_rx_tx { @@ -874,18 +939,21 @@ struct mlx5dr_matcher_rx_tx { u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX]; u64 default_icm_addr; struct mlx5dr_table_rx_tx *nic_tbl; + u32 prio; + struct list_head list_node; + u32 rules; }; struct mlx5dr_matcher { struct mlx5dr_table *tbl; struct mlx5dr_matcher_rx_tx rx; struct mlx5dr_matcher_rx_tx tx; - struct list_head matcher_list; + struct list_head list_node; /* Used for both matchers and dbg managing */ u32 prio; struct mlx5dr_match_param mask; u8 match_criteria; refcount_t refcount; - struct mlx5dv_flow_matcher *dv_matcher; + struct list_head dbg_rule_list; }; struct mlx5dr_ste_action_modify_field { @@ -957,6 +1025,11 @@ struct mlx5dr_action_flow_tag { u32 flow_tag; }; +struct mlx5dr_rule_action_member { + struct mlx5dr_action *action; + struct list_head list; +}; + struct mlx5dr_action { enum mlx5dr_action_type action_type; refcount_t refcount; @@ -997,6 +1070,7 @@ struct mlx5dr_rule { struct mlx5dr_rule_rx_tx rx; struct mlx5dr_rule_rx_tx tx; struct list_head rule_actions_list; + struct list_head dbg_node; u32 flow_source; }; @@ -1049,6 +1123,11 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn) mlx5dr_domain_nic_unlock(&dmn->info.rx); } +int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *nic_matcher); +int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn, + struct mlx5dr_matcher_rx_tx *nic_matcher); + int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 2632d5ae9bc0..a476da2424f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies */ +#include <linux/mlx5/vport.h> #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" @@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain, dest_attr->vport.vhca_id); } +static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain, + struct mlx5_flow_rule *dst) +{ + struct mlx5_flow_destination *dest_attr = &dst->dest_attr; + + return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1, + dest_attr->vport.vhca_id); +} + static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain, struct mlx5_flow_rule *dst) { @@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) { - return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; } @@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, fs_dr_actions[fs_dr_num_actions++] = tmp_action; term_actions[num_term_actions++].dest = tmp_action; break; + case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: - tmp_action = create_vport_action(domain, dst); + tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ? + create_vport_action(domain, dst) : + create_uplink_action(domain, dst); if (!tmp_action) { err = -ENOMEM; goto free_actions; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index d2a937f69784..9604b2091358 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits { u8 flex_parser_4[0x20]; }; +struct mlx5_ifc_ste_flex_parser_ok_bits { + u8 flex_parser_3[0x20]; + u8 flex_parser_2[0x20]; + u8 flex_parsers_ok[0x8]; + u8 reserved_at_48[0x18]; + u8 flex_parser_0[0x20]; +}; + struct mlx5_ifc_ste_flex_parser_tnl_bits { u8 flex_parser_tunneling_header_63_32[0x20]; @@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_ste_tunnel_header_bits { + u8 tunnel_header_0[0x20]; + + u8 tunnel_header_1[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_ste_general_purpose_bits { u8 general_purpose_lookup_field[0x20]; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c index 92b798f8e73a..ceeb7f4c3f6c 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c @@ -33,8 +33,11 @@ static void mlxbf_gige_get_regs(struct net_device *netdev, memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ); } -static void mlxbf_gige_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) +static void +mlxbf_gige_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct mlxbf_gige *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index d1ae248e125c..4683312861ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -66,7 +66,7 @@ config MLXSW_SPECTRUM default m help This driver supports Mellanox Technologies - Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs. + Spectrum/Spectrum-2/Spectrum-3/Spectrum-4 Ethernet Switch ASICs. To compile this driver as a module, choose M here: the module will be called mlxsw_spectrum. diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 392ce3cb27f7..51b260d54237 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -935,6 +935,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); +enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp { + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE, + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE, +}; + +/* cmd_mbox_sw2hw_dq_sdq_lp + * SDQ local Processing + * 0: local processing by wqe.lp + * 1: local processing (ignoring wqe.lp) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1); + /* cmd_mbox_sw2hw_dq_sdq_tclass * SDQ: CPU Egress TClass * RDQ: Reserved diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 3fd3812b8f31..866b9357939b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -47,7 +47,7 @@ static struct workqueue_struct *mlxsw_owq; struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; - u8 local_port; + u16 local_port; }; void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) @@ -77,7 +77,7 @@ struct mlxsw_core { bool enable_string_tlv; } emad; struct { - u8 *mapping; /* lag_id+port_index to local_port mapping */ + u16 *mapping; /* lag_id+port_index to local_port mapping */ } lag; struct mlxsw_res res; struct mlxsw_hwmon *hwmon; @@ -160,7 +160,7 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload) devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS); if (!reload) - devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); + devlink_resources_unregister(priv_to_devlink(mlxsw_core)); kfree(mlxsw_core->ports); } @@ -718,7 +718,7 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, } /* called with rcu read lock held */ -static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port, void *priv) { struct mlxsw_core *mlxsw_core = priv; @@ -1708,6 +1708,124 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg, static const struct mlxsw_listener mlxsw_core_health_listener = MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE); +static int +mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl, + struct devlink_fmsg *fmsg) +{ + u32 val, tile_v; + int err; + + val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val); + if (err) + return err; + tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl); + if (tile_v) { + val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl); + err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); + if (err) + return err; + } + + return 0; +} + +static int +mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl, + struct devlink_fmsg *fmsg) +{ + u32 val, tile_v; + int err; + + val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "var0", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "var1", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "var2", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "var3", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "var4", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "callra", val); + if (err) + return err; + val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl); + err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); + if (err) + return err; + tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl); + if (tile_v) { + val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl); + err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val); + if (err) + return err; + } + val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val); + if (err) + return err; + + return 0; +} + +static int +mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl, + struct devlink_fmsg *fmsg) +{ + u32 val; + int err; + + val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl); + err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); + if (err) + return err; + val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl); + return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); +} + +static int +mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl, + struct devlink_fmsg *fmsg) +{ + u32 val; + int err; + + val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl); + err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val); + if (err) + return err; + val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl); + err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val); + if (err) + return err; + val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl); + err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); + if (err) + return err; + val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl); + err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); + if (err) + return err; + + return 0; +} + static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx, struct netlink_ext_ack *extack) @@ -1741,6 +1859,46 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: val_str = "KVD insertion machine stopped"; break; + case MLXSW_REG_MFDE_EVENT_ID_TEST: + val_str = "Test"; + break; + case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: + val_str = "FW assert"; + break; + case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: + val_str = "Fatal cause"; + break; + default: + val_str = NULL; + } + if (val_str) { + err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str); + if (err) + return err; + } + + err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (err) + return err; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity"); + if (err) + return err; + + val = mlxsw_reg_mfde_severity_get(mfde_pl); + err = devlink_fmsg_u8_pair_put(fmsg, "id", val); + if (err) + return err; + switch (val) { + case MLXSW_REG_MFDE_SEVERITY_FATL: + val_str = "Fatal"; + break; + case MLXSW_REG_MFDE_SEVERITY_NRML: + val_str = "Normal"; + break; + case MLXSW_REG_MFDE_SEVERITY_INTR: + val_str = "Debug"; + break; default: val_str = NULL; } @@ -1749,6 +1907,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor if (err) return err; } + err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) return err; @@ -1800,24 +1959,18 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor if (err) return err; - if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) { - val = mlxsw_reg_mfde_log_address_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val); - if (err) - return err; - val = mlxsw_reg_mfde_log_id_get(mfde_pl); - err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val); - if (err) - return err; - val = mlxsw_reg_mfde_log_ip_get(mfde_pl); - err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val); - if (err) - return err; - } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) { - val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl); - err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val); - if (err) - return err; + switch (event_id) { + case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO: + return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, + fmsg); + case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP: + return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, + fmsg); + case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT: + return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg); + case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE: + return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, + fmsg); } return 0; @@ -1959,7 +2112,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { - alloc_size = sizeof(u8) * + alloc_size = sizeof(*mlxsw_core->lag.mapping) * MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) * MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); @@ -2033,7 +2186,7 @@ err_alloc_lag_mapping: mlxsw_ports_fini(mlxsw_core, reload); err_ports_init: if (!reload) - devlink_resources_unregister(devlink, NULL); + devlink_resources_unregister(devlink); err_register_resources: mlxsw_bus->fini(bus_priv); err_bus_init: @@ -2099,7 +2252,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, kfree(mlxsw_core->lag.mapping); mlxsw_ports_fini(mlxsw_core, reload); if (!reload) - devlink_resources_unregister(devlink, NULL); + devlink_resources_unregister(devlink); mlxsw_core->bus->fini(mlxsw_core->bus_priv); if (!reload) devlink_free(devlink); @@ -2108,7 +2261,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, reload_fail_deinit: mlxsw_core_params_unregister(mlxsw_core); - devlink_resources_unregister(devlink, NULL); + devlink_resources_unregister(devlink); devlink_free(devlink); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); @@ -2130,7 +2283,7 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, EXPORT_SYMBOL(mlxsw_core_skb_transmit); void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { if (mlxsw_core->driver->ptp_transmitted) mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb, @@ -2208,7 +2361,7 @@ mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core, rxl_item->enabled = enabled; } -static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port, void *priv) { struct mlxsw_event_listener_item *event_listener_item = priv; @@ -2641,7 +2794,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, { struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; - u8 local_port; + u16 local_port; bool found = false; if (rx_info->is_lag) { @@ -2699,7 +2852,7 @@ static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, } void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index, u8 local_port) + u16 lag_id, u8 port_index, u16 local_port) { int index = mlxsw_core_lag_mapping_index(mlxsw_core, lag_id, port_index); @@ -2708,8 +2861,8 @@ void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); -u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index) +u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) { int index = mlxsw_core_lag_mapping_index(mlxsw_core, lag_id, port_index); @@ -2719,7 +2872,7 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 local_port) + u16 lag_id, u16 local_port) { int i; @@ -2747,7 +2900,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_res_get); -static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_port_subnumber, @@ -2778,7 +2931,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, return err; } -static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2788,7 +2941,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); } -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, @@ -2810,7 +2963,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_init); -void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port) { atomic_dec(&mlxsw_core->active_ports_count); @@ -2845,7 +2998,7 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_cpu_port_fini); -void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv, struct net_device *dev) { struct mlxsw_core_port *mlxsw_core_port = @@ -2857,7 +3010,7 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_eth_set); -void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv) { struct mlxsw_core_port *mlxsw_core_port = @@ -2869,7 +3022,7 @@ void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, } EXPORT_SYMBOL(mlxsw_core_port_ib_set); -void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv) { struct mlxsw_core_port *mlxsw_core_port = @@ -2882,7 +3035,7 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, EXPORT_SYMBOL(mlxsw_core_port_clear); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, - u8 local_port) + u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2895,7 +3048,7 @@ EXPORT_SYMBOL(mlxsw_core_port_type_get); struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, - u8 local_port) + u16 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; @@ -2905,7 +3058,7 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); -bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port) +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) { const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; int i; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 12023a550007..f30bb8614e69 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -54,7 +54,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload); struct mlxsw_tx_info { - u8 local_port; + u16 local_port; bool is_emad; }; @@ -67,7 +67,7 @@ struct mlxsw_rx_md_info { u16 tx_sys_port; u16 tx_lag_id; }; - u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ + u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */ u8 tx_tc; u8 latency_valid:1, tx_congestion_valid:1, @@ -82,11 +82,11 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); struct mlxsw_rx_listener { - void (*func)(struct sk_buff *skb, u8 local_port, void *priv); - u8 local_port; + void (*func)(struct sk_buff *skb, u16 local_port, void *priv); + u16 local_port; u8 mirror_reason; u16 trap_id; }; @@ -209,7 +209,7 @@ struct mlxsw_rx_info { u16 sys_port; u16 lag_id; } u; - u8 lag_port_index; + u16 lag_port_index; u8 mirror_reason; int trap_id; }; @@ -218,36 +218,36 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_info *rx_info); void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index, u8 local_port); -u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 port_index); + u16 lag_id, u8 port_index, u16 local_port); +u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index); void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, - u16 lag_id, u8 local_port); + u16 lag_id, u16 local_port); void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); -void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); +void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port); int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, void *port_driver_priv, const unsigned char *switch_id, unsigned char switch_id_len); void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core); -void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv, struct net_device *dev); -void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv); -void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, +void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port, void *port_driver_priv); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, - u8 local_port); + u16 local_port); struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, - u8 local_port); -bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); + u16 local_port); +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); @@ -316,11 +316,11 @@ struct mlxsw_driver { struct netlink_ext_ack *extack); void (*fini)(struct mlxsw_core *mlxsw_core); int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core); - int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_type new_type); - int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port, unsigned int count, struct netlink_ext_ack *extack); - int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port, + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, @@ -394,7 +394,7 @@ struct mlxsw_driver { * is responsible for freeing the passed-in SKB. */ void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); u8 txhdr_len; const struct mlxsw_config_profile *profile; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 78d9c0196f2b..77e82e6cf6e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -113,7 +113,7 @@ static const struct rhashtable_params mlxsw_afa_set_ht_params = { }; struct mlxsw_afa_fwd_entry_ht_key { - u8 local_port; + u16 local_port; }; struct mlxsw_afa_fwd_entry { @@ -555,7 +555,7 @@ int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block) EXPORT_SYMBOL(mlxsw_afa_block_terminate); static struct mlxsw_afa_fwd_entry * -mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) +mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port) { struct mlxsw_afa_fwd_entry *fwd_entry; int err; @@ -598,7 +598,7 @@ static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa, } static struct mlxsw_afa_fwd_entry * -mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port) +mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port) { struct mlxsw_afa_fwd_entry_ht_key ht_key = {0}; struct mlxsw_afa_fwd_entry *fwd_entry; @@ -647,7 +647,7 @@ mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_fwd_entry_ref * -mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port) +mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; struct mlxsw_afa_fwd_entry *fwd_entry; @@ -1352,7 +1352,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward); struct mlxsw_afa_mirror { struct mlxsw_afa_resource resource; int span_id; - u8 local_in_port; + u16 local_in_port; bool ingress; }; @@ -1379,7 +1379,7 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_mirror * -mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port, +mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port, const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; @@ -1423,7 +1423,7 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, } int -mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, +mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port, const struct net_device *out_dev, bool ingress, struct netlink_ext_ack *extack) { @@ -1663,7 +1663,7 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, } int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port, + u16 local_port, bool in_port, struct netlink_ext_ack *extack) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; @@ -2038,7 +2038,7 @@ static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate) struct mlxsw_afa_sampler { struct mlxsw_afa_resource resource; int span_id; - u8 local_port; + u16 local_port; bool ingress; }; @@ -2061,7 +2061,7 @@ static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_sampler * -mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port, +mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, struct netlink_ext_ack *extack) @@ -2104,7 +2104,7 @@ mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block, return 0; } -int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index b65bf98eb5ab..16cbd6acbb01 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -17,24 +17,24 @@ struct mlxsw_afa_ops { void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index, bool *activity); - int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); + int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); void (*counter_index_put)(void *priv, unsigned int counter_index); - int (*mirror_add)(void *priv, u8 local_in_port, + int (*mirror_add)(void *priv, u16 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id); - void (*mirror_del)(void *priv, u8 local_in_port, int span_id, + void (*mirror_del)(void *priv, u16 local_in_port, int span_id, bool ingress); int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); void (*policer_del)(void *priv, u16 policer_index); - int (*sampler_add)(void *priv, u8 local_port, + int (*sampler_add)(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, struct netlink_ext_ack *extack); - void (*sampler_del)(void *priv, u8 local_port, int span_id, + void (*sampler_del)(void *priv, u16 local_port, int span_id, bool ingress); bool dummy_first_set; }; @@ -62,12 +62,12 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, + u16 local_in_port, const struct net_device *out_dev, bool ingress, struct netlink_ext_ack *extack); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port, + u16 local_port, bool in_port, struct netlink_ext_ack *extack); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, u16 vid, u8 pcp, u8 et, @@ -98,7 +98,7 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, u32 fa_index, u64 rate_bytes_ps, u32 burst, u16 *p_policer_index, struct netlink_ext_ack *extack); -int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port, +int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index f1b09c2f9eda..bd1a51a0a540 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index a47a17c04c62..3a037fe47211 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -33,8 +33,8 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_IP_TTL_, MLXSW_AFK_ELEMENT_IP_ECN, MLXSW_AFK_ELEMENT_IP_DSCP, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, MLXSW_AFK_ELEMENT_MAX, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index ab70a873a01a..cfafbeb42586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -367,6 +367,42 @@ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ } +#define LOCAL_PORT_LSB_SIZE 8 +#define LOCAL_PORT_MSB_SIZE 2 + +#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \ + .offset = _offset1, \ + .shift = _shift1, \ + .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \ + .name = #_type "_" #_cname "_local_port", \ +}; \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \ + .offset = _offset2, \ + .shift = _shift2, \ + .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \ + .name = #_type "_" #_cname "_lp_msb", \ +}; \ +static inline u32 __maybe_unused \ +mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \ +{ \ + u32 local_port, lp_msb; \ + \ + local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \ + local_port), 0); \ + lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \ + 0); \ + return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \ +} \ +static inline void __maybe_unused \ +mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \ + val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \ + val >> LOCAL_PORT_LSB_SIZE); \ +} + #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ _step, _instepoffset, _norealshift) \ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 5d4dfa5ddbb5..10d13f5f9c7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -38,7 +38,7 @@ struct mlxsw_m { struct mlxsw_m_port { struct net_device *dev; struct mlxsw_m *mlxsw_m; - u8 local_port; + u16 local_port; u8 module; }; @@ -180,7 +180,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { }; static int -mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port, +mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port, u8 *p_module, u8 *p_width) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -214,7 +214,7 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port) } static int -mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) +mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module) { struct mlxsw_m_port *mlxsw_m_port; struct net_device *dev; @@ -277,7 +277,7 @@ err_alloc_etherdev: return err; } -static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port) +static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port) { struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port]; @@ -288,7 +288,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port) mlxsw_core_port_fini(mlxsw_m->core, local_port); } -static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, +static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, u8 *last_module) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index a15c95a10bae..f91dde4df152 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { int tclass; + int lp; int i; int err; @@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->consumer_counter = 0; tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : MLXSW_PCI_SDQ_CTL_TC; + lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -1678,7 +1682,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, wqe = elem_info->elem; mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ - mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); + mlxsw_pci_wqe_lp_set(wqe, 0); mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, @@ -1973,6 +1977,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver) { pci_driver->probe = mlxsw_pci_probe; pci_driver->remove = mlxsw_pci_remove; + pci_driver->shutdown = mlxsw_pci_remove; return pci_register_driver(pci_driver); } EXPORT_SYMBOL(mlxsw_pci_driver_register); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 9899c1a2ea8f..cacc2f9fa1d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -9,6 +9,7 @@ #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c #define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM4 0xcf80 #if IS_ENABLED(CONFIG_MLXSW_PCI) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8d420eb8ade2..24cc65018b41 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -69,52 +69,6 @@ MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN); */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); -/* SMID - Switch Multicast ID - * -------------------------- - * The MID record maps from a MID (Multicast ID), which is a unique identifier - * of the multicast group within the stacking domain, into a list of local - * ports into which the packet is replicated. - */ -#define MLXSW_REG_SMID_ID 0x2007 -#define MLXSW_REG_SMID_LEN 0x240 - -MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN); - -/* reg_smid_swid - * Switch partition ID. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); - -/* reg_smid_mid - * Multicast identifier - global identifier that represents the multicast group - * across all devices. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); - -/* reg_smid_port - * Local port memebership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); - -/* reg_smid_port_mask - * Local port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, - u8 port, bool set) -{ - MLXSW_REG_ZERO(smid, payload); - mlxsw_reg_smid_swid_set(payload, 0); - mlxsw_reg_smid_mid_set(payload, mid); - mlxsw_reg_smid_port_set(payload, port, set); - mlxsw_reg_smid_port_mask_set(payload, port, 1); -} - /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -141,7 +95,7 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); * * Access: RW */ -MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12); /* reg_sspr_sub_port * Virtual port within the physical port. @@ -161,7 +115,7 @@ MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); */ MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16); -static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(sspr, payload); mlxsw_reg_sspr_m_set(payload, 1); @@ -407,7 +361,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, enum mlxsw_reg_sfd_rec_policy policy, const char *mac, u16 fid_vid, enum mlxsw_reg_sfd_rec_action action, - u8 local_port) + u16 local_port) { mlxsw_reg_sfd_rec_pack(payload, rec_index, MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); @@ -417,15 +371,6 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); } -static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, - char *mac, u16 *p_fid_vid, - u8 *p_local_port) -{ - mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); - *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); -} - /* reg_sfd_uc_lag_sub_port * LAG sub port. * Must be 0 if multichannel VEPA is not enabled. @@ -478,15 +423,6 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id); } -static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, - char *mac, u16 *p_vid, - u16 *p_lag_id) -{ - mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index); - *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); -} - /* reg_sfd_mc_pgi * * Multicast port group index - index into the port group table. @@ -568,19 +504,43 @@ static inline void mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, enum mlxsw_reg_sfd_rec_policy policy, const char *mac, u16 fid, - enum mlxsw_reg_sfd_rec_action action, u32 uip, + enum mlxsw_reg_sfd_rec_action action, enum mlxsw_reg_sfd_uc_tunnel_protocol proto) { mlxsw_reg_sfd_rec_pack(payload, rec_index, MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac, action); mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); - mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); - mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid); mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); } +static inline void +mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid, + enum mlxsw_reg_sfd_rec_action action, u32 uip) +{ + mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); + mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid, + action, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4); +} + +static inline void +mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac, + u16 fid, enum mlxsw_reg_sfd_rec_action action, + u32 uip_ptr) +{ + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr); + /* Only static policy is supported for IPv6 unicast tunnel entry. */ + mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY, + mac, fid, action, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6); +} + enum mlxsw_reg_tunnel_port { MLXSW_REG_TUNNEL_PORT_NVE, MLXSW_REG_TUNNEL_PORT_VPLS, @@ -692,7 +652,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16, static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, char *mac, u16 *p_vid, - u8 *p_local_port) + u16 *p_local_port) { mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); @@ -781,7 +741,7 @@ MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12); enum mlxsw_reg_spms_state { MLXSW_REG_SPMS_STATE_NO_CHANGE, @@ -800,7 +760,7 @@ enum mlxsw_reg_spms_state { */ MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); -static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(spms, payload); mlxsw_reg_spms_local_port_set(payload, local_port); @@ -833,7 +793,7 @@ MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1); * When tport = 1: Tunnel port. * Access: Index */ -MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12); /* reg_spvid_sub_port * Virtual port within the physical port. @@ -868,7 +828,7 @@ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2); */ MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); -static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid, +static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid, u8 et_vlan) { MLXSW_REG_ZERO(spvid, payload); @@ -911,7 +871,7 @@ MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12); /* reg_spvm_sub_port * Virtual port within the physical port. @@ -959,7 +919,7 @@ MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid, MLXSW_REG_SPVM_BASE_LEN, 0, 12, MLXSW_REG_SPVM_REC_LEN, 0, false); -static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged) { @@ -994,7 +954,7 @@ MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN); * * Note: CPU port is not supported (all tag types are allowed). */ -MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12); /* reg_spaft_sub_port * Virtual port within the physical port. @@ -1021,7 +981,7 @@ MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1); */ MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1); -static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port, bool allow_untagged) { MLXSW_REG_ZERO(spaft, payload); @@ -1126,76 +1086,6 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type, mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID); } -/* SFTR - Switch Flooding Table Register - * ------------------------------------- - * The switch flooding table is used for flooding packet replication. The table - * defines a bit mask of ports for packet replication. - */ -#define MLXSW_REG_SFTR_ID 0x2012 -#define MLXSW_REG_SFTR_LEN 0x420 - -MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN); - -/* reg_sftr_swid - * Switch partition ID with which to associate the port. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8); - -/* reg_sftr_flood_table - * Flooding table index to associate with the specific type on the specific - * switch partition. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6); - -/* reg_sftr_index - * Index. Used as an index into the Flooding Table in case the table is - * configured to use VID / FID or FID Offset. - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16); - -/* reg_sftr_table_type - * See mlxsw_flood_table_type - * Access: RW - */ -MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3); - -/* reg_sftr_range - * Range of entries to update - * Access: Index - */ -MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16); - -/* reg_sftr_port - * Local port membership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1); - -/* reg_sftr_cpu_port_mask - * CPU port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_sftr_pack(char *payload, - unsigned int flood_table, - unsigned int index, - enum mlxsw_flood_table_type table_type, - unsigned int range, u8 port, bool set) -{ - MLXSW_REG_ZERO(sftr, payload); - mlxsw_reg_sftr_swid_set(payload, 0); - mlxsw_reg_sftr_flood_table_set(payload, flood_table); - mlxsw_reg_sftr_index_set(payload, index); - mlxsw_reg_sftr_table_type_set(payload, table_type); - mlxsw_reg_sftr_range_set(payload, range); - mlxsw_reg_sftr_port_set(payload, port, set); - mlxsw_reg_sftr_port_mask_set(payload, port, 1); -} - /* SFDF - Switch Filtering DB Flush * -------------------------------- * The switch filtering DB flush register is used to flush the FDB. @@ -1347,7 +1237,7 @@ MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8); MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false); static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(sldr, payload); mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST); @@ -1357,7 +1247,7 @@ static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, } static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(sldr, payload); mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST); @@ -1397,7 +1287,7 @@ MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); * Reserved when pp = Global Configuration * Access: Index */ -MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12); enum mlxsw_reg_slcr_type { MLXSW_REG_SLCR_TYPE_CRC, /* default */ @@ -1515,7 +1405,7 @@ MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12); /* reg_slcor_lag_id * LAG Identifier. Index into the LAG descriptor table. @@ -1531,7 +1421,7 @@ MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10); MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10); static inline void mlxsw_reg_slcor_pack(char *payload, - u8 local_port, u16 lag_id, + u16 local_port, u16 lag_id, enum mlxsw_reg_slcor_col col) { MLXSW_REG_ZERO(slcor, payload); @@ -1541,7 +1431,7 @@ static inline void mlxsw_reg_slcor_pack(char *payload, } static inline void mlxsw_reg_slcor_port_add_pack(char *payload, - u8 local_port, u16 lag_id, + u16 local_port, u16 lag_id, u8 port_index) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, @@ -1550,21 +1440,21 @@ static inline void mlxsw_reg_slcor_port_add_pack(char *payload, } static inline void mlxsw_reg_slcor_port_remove_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT); } static inline void mlxsw_reg_slcor_col_enable_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); } static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, - u8 local_port, u16 lag_id) + u16 local_port, u16 lag_id) { mlxsw_reg_slcor_pack(payload, local_port, lag_id, MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); @@ -1583,7 +1473,7 @@ MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12); /* reg_spmlr_sub_port * Virtual port within the physical port. @@ -1611,7 +1501,7 @@ enum mlxsw_reg_spmlr_learn_mode { */ MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2); -static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port, enum mlxsw_reg_spmlr_learn_mode mode) { MLXSW_REG_ZERO(spmlr, payload); @@ -1642,7 +1532,7 @@ MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8); * * Note: Reserved for 802.1Q FIDs. */ -MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12); enum mlxsw_reg_svfa_mt { MLXSW_REG_SVFA_MT_VID_TO_FID, @@ -1696,7 +1586,7 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8); */ MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24); -static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port, enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, u16 vid) { @@ -1733,7 +1623,7 @@ MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1); * When tport = 1: tunnel port. * Access: Index */ -MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12); /* reg_spvtr_ippe * Ingress Port Prio Mode Update Enable. @@ -1803,7 +1693,7 @@ enum mlxsw_reg_spvtr_epvid_mode { MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4); static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport, - u8 local_port, + u16 local_port, enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode) { MLXSW_REG_ZERO(spvtr, payload); @@ -1828,7 +1718,7 @@ MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN); * * Note: CPU port is not supported (uses VLAN mode only). */ -MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12); /* reg_svpe_vp_en * Virtual port enable. @@ -1838,7 +1728,7 @@ MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1); -static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port, bool enable) { MLXSW_REG_ZERO(svpe, payload); @@ -1948,7 +1838,7 @@ MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN); * * Note: CPU port is not supported. */ -MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12); /* reg_spvmlr_num_rec * Number of records to update. @@ -1971,7 +1861,7 @@ MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN, MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); -static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port, u16 vid_begin, u16 vid_end, bool learn_enable) { @@ -2009,7 +1899,7 @@ MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN); * through Rx port i and a Tx port j then port i and port j must have the * same configuration. */ -MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12); /* reg_spvc_inner_et2 * Vlan Tag1 EtherType2 enable. @@ -2074,7 +1964,7 @@ MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1); */ MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1); -static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1, +static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1, bool et0) { MLXSW_REG_ZERO(spvc, payload); @@ -2104,7 +1994,7 @@ MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN); * Not supported to CPU port. * Access: Index */ -MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12); /* reg_spevet_et_vlan * Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet: @@ -2115,7 +2005,7 @@ MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2); -static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port, u8 et_vlan) { MLXSW_REG_ZERO(spevet, payload); @@ -2123,6 +2013,122 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port, mlxsw_reg_spevet_et_vlan_set(payload, et_vlan); } +/* SFTR-V2 - Switch Flooding Table Version 2 Register + * -------------------------------------------------- + * The switch flooding table is used for flooding packet replication. The table + * defines a bit mask of ports for packet replication. + */ +#define MLXSW_REG_SFTR2_ID 0x202F +#define MLXSW_REG_SFTR2_LEN 0x120 + +MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN); + +/* reg_sftr2_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8); + +/* reg_sftr2_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6); + +/* reg_sftr2_index + * Index. Used as an index into the Flooding Table in case the table is + * configured to use VID / FID or FID Offset. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16); + +/* reg_sftr2_table_type + * See mlxsw_flood_table_type + * Access: RW + */ +MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3); + +/* reg_sftr2_range + * Range of entries to update + * Access: Index + */ +MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16); + +/* reg_sftr2_port + * Local port membership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1); + +/* reg_sftr2_port_mask + * Local port mask (1 bit per port). + * Access: WO + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1); + +static inline void mlxsw_reg_sftr2_pack(char *payload, + unsigned int flood_table, + unsigned int index, + enum mlxsw_flood_table_type table_type, + unsigned int range, u16 port, bool set) +{ + MLXSW_REG_ZERO(sftr2, payload); + mlxsw_reg_sftr2_swid_set(payload, 0); + mlxsw_reg_sftr2_flood_table_set(payload, flood_table); + mlxsw_reg_sftr2_index_set(payload, index); + mlxsw_reg_sftr2_table_type_set(payload, table_type); + mlxsw_reg_sftr2_range_set(payload, range); + mlxsw_reg_sftr2_port_set(payload, port, set); + mlxsw_reg_sftr2_port_mask_set(payload, port, 1); +} + +/* SMID-V2 - Switch Multicast ID Version 2 Register + * ------------------------------------------------ + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID2_ID 0x2034 +#define MLXSW_REG_SMID2_LEN 0x120 + +MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN); + +/* reg_smid2_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8); + +/* reg_smid2_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16); + +/* reg_smid2_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1); + +/* reg_smid2_port_mask + * Local port mask (1 bit per port). + * Access: WO + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1); + +static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port, + bool set) +{ + MLXSW_REG_ZERO(smid2, payload); + mlxsw_reg_smid2_swid_set(payload, 0); + mlxsw_reg_smid2_mid_set(payload, mid); + mlxsw_reg_smid2_port_set(payload, port, set); + mlxsw_reg_smid2_port_mask_set(payload, port, 1); +} + /* CWTP - Congetion WRED ECN TClass Profile * ---------------------------------------- * Configures the profiles for queues of egress port and traffic class @@ -2139,7 +2145,7 @@ MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8); +MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12); /* reg_cwtp_traffic_class * Traffic Class to configure @@ -2173,7 +2179,7 @@ MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN, #define MLXSW_REG_CWTP_MAX_PROFILE 2 #define MLXSW_REG_CWTP_DEFAULT_PROFILE 1 -static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port, u8 traffic_class) { int i; @@ -2217,7 +2223,7 @@ MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8); +MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12); /* reg_cwtpm_traffic_class * Traffic Class to configure @@ -2291,7 +2297,7 @@ MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2); #define MLXSW_REG_CWTPM_RESET_PROFILE 0 -static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port, u8 traffic_class, u8 profile, bool wred, bool ecn) { @@ -2363,7 +2369,7 @@ MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3); * Local port. Not including CPU port. * Access: Index */ -MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12); /* reg_ppbt_g * group - When set, the binding is of an ACL group. When cleared, @@ -2382,7 +2388,7 @@ MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16); static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e, enum mlxsw_reg_pxbt_op op, - u8 local_port, u16 acl_info) + u16 local_port, u16 acl_info) { MLXSW_REG_ZERO(ppbt, payload); mlxsw_reg_ppbt_e_set(payload, e); @@ -3513,7 +3519,7 @@ MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN); * * Note: CPU port is supported. */ -MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12); enum mlxsw_reg_qpts_trust_state { MLXSW_REG_QPTS_TRUST_STATE_PCP = 1, @@ -3526,7 +3532,7 @@ enum mlxsw_reg_qpts_trust_state { */ MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3); -static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port, enum mlxsw_reg_qpts_trust_state ts) { MLXSW_REG_ZERO(qpts, payload); @@ -3717,7 +3723,7 @@ MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN); * * Note: CPU port is not supported. */ -MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12); /* reg_qtct_sub_port * Virtual port within the physical port. @@ -3742,7 +3748,7 @@ MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4); */ MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4); -static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port, u8 switch_prio, u8 tclass) { MLXSW_REG_ZERO(qtct, payload); @@ -3766,7 +3772,7 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN); * * Note: CPU port is supported. */ -MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12); enum mlxsw_reg_qeec_hr { MLXSW_REG_QEEC_HR_PORT, @@ -3908,8 +3914,9 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6); #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11 +#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4 11 -static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index) { @@ -3920,7 +3927,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, mlxsw_reg_qeec_next_element_index_set(payload, next_index); } -static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port, bool ptps) { MLXSW_REG_ZERO(qeec, payload); @@ -3944,7 +3951,7 @@ MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN); * * Note: CPU port is supported. No support for router port. */ -MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12); /* reg_qrwe_dscp * Whether to enable DSCP rewrite (default is 0, don't rewrite). @@ -3958,7 +3965,7 @@ MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1); */ MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1); -static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port, bool rewrite_pcp, bool rewrite_dscp) { MLXSW_REG_ZERO(qrwe, payload); @@ -3985,7 +3992,7 @@ MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12); /* reg_qpdsm_prio_entry_color0_e * Enable update of the entry for color 0 and a given port. @@ -4038,7 +4045,7 @@ MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp, MLXSW_REG_QPDSM_BASE_LEN, 8, 6, MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); -static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(qpdsm, payload); mlxsw_reg_qpdsm_local_port_set(payload, local_port); @@ -4071,7 +4078,7 @@ MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12); /* reg_qpdp_switch_prio * Default port Switch Priority (default 0) @@ -4079,7 +4086,7 @@ MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4); -static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port, u8 switch_prio) { MLXSW_REG_ZERO(qpdp, payload); @@ -4106,7 +4113,7 @@ MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN); * Local Port. Supported for data packets from CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12); /* reg_qpdpm_dscp_e * Enable update of the specific entry. When cleared, the switch_prio and color @@ -4125,7 +4132,7 @@ MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio, MLXSW_REG_QPDPM_BASE_LEN, 0, 4, MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); -static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(qpdpm, payload); mlxsw_reg_qpdpm_local_port_set(payload, local_port); @@ -4157,7 +4164,7 @@ MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN); * No support for CPU port. * Access: Index */ -MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12); /* reg_qtctm_mc * Multicast Mode @@ -4167,7 +4174,7 @@ MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8); MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1); static inline void -mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc) +mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc) { MLXSW_REG_ZERO(qtctm, payload); mlxsw_reg_qtctm_local_port_set(payload, local_port); @@ -4300,7 +4307,7 @@ MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12); /* reg_pmlp_width * 0 - Unmap local port. @@ -4331,7 +4338,7 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false); */ MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false); -static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pmlp, payload); mlxsw_reg_pmlp_local_port_set(payload, local_port); @@ -4350,7 +4357,7 @@ MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12); /* reg_pmtu_max_mtu * Maximum MTU. @@ -4378,7 +4385,7 @@ MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16); */ MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16); -static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port, u16 new_mtu) { MLXSW_REG_ZERO(pmtu, payload); @@ -4412,7 +4419,7 @@ MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12); #define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0) #define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2) @@ -4572,7 +4579,7 @@ enum mlxsw_reg_ptys_connector_type { */ MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4); -static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port, u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); @@ -4582,7 +4589,7 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); } -static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port, u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); @@ -4624,7 +4631,7 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload, mlxsw_reg_ptys_ext_eth_proto_oper_get(payload); } -static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port, u16 proto_admin, u16 link_width) { MLXSW_REG_ZERO(ptys, payload); @@ -4672,7 +4679,7 @@ MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1); * port number, if single_base_mac = 0 then local_port is reserved * Access: RW */ -MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24); /* reg_ppad_mac * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved. @@ -4682,7 +4689,7 @@ MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6); static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac, - u8 local_port) + u16 local_port) { MLXSW_REG_ZERO(ppad, payload); mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac); @@ -4711,7 +4718,7 @@ MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12); /* reg_paos_admin_status * Port administrative state (the desired state of the port): @@ -4756,7 +4763,7 @@ MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1); */ MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2); -static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port, enum mlxsw_port_admin_status status) { MLXSW_REG_ZERO(paos, payload); @@ -4782,7 +4789,7 @@ MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12); /* reg_pfcc_pnat * Port number access type. Determines the way local_port is interpreted: @@ -4899,7 +4906,7 @@ static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en) mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en); } -static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pfcc, payload); mlxsw_reg_pfcc_local_port_set(payload, local_port); @@ -4928,11 +4935,9 @@ MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8); /* reg_ppcnt_local_port * Local port number. - * 255 indicates all ports on the device, and is only allowed - * for Set() operation. * Access: Index */ -MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12); /* reg_ppcnt_pnat * Port number access type: @@ -4981,6 +4986,14 @@ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); */ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); +/* reg_ppcnt_lp_gl + * Local port global variable. + * 0: local_port 255 = all ports of the device. + * 1: local_port indicates local port number for all ports. + * Access: OP + */ +MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1); + /* reg_ppcnt_prio_tc * Priority for counter set that support per priority, valid values: 0-7. * Traffic class for counter set that support per traffic class, @@ -5404,7 +5417,7 @@ MLXSW_ITEM64(reg, ppcnt, wred_discard, MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); -static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) { @@ -5414,6 +5427,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, mlxsw_reg_ppcnt_pnat_set(payload, 0); mlxsw_reg_ppcnt_grp_set(payload, grp); mlxsw_reg_ppcnt_clr_set(payload, 0); + mlxsw_reg_ppcnt_lp_gl_set(payload, 1); mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc); } @@ -5430,7 +5444,7 @@ MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12); /* reg_plib_ib_port * InfiniBand port remapping for local_port. @@ -5468,7 +5482,7 @@ MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12); /* reg_pptb_um * Enables the update of the untagged_buf field. @@ -5515,7 +5529,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4); #define MLXSW_REG_PPTB_ALL_PRIO 0xFF -static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pptb, payload); mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); @@ -5545,7 +5559,7 @@ MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12); /* reg_pbmc_xoff_timer_value * When device generates a pause frame, it uses this value as the pause @@ -5612,7 +5626,7 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16, MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16, 0x08, 0x04, false); -static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port, u16 xoff_timer_value, u16 xoff_refresh) { MLXSW_REG_ZERO(pbmc, payload); @@ -5661,7 +5675,7 @@ MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0); /* reg_pspa_sub_port * Virtual port within the local port. Set to 0 when virtual ports are @@ -5670,7 +5684,7 @@ MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8); -static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) +static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port) { MLXSW_REG_ZERO(pspa, payload); mlxsw_reg_pspa_swid_set(payload, swid); @@ -5772,7 +5786,7 @@ MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12); /* Phy local loopback. When set the port's egress traffic is looped back * to the receiver and the port transmitter is disabled. @@ -5785,7 +5799,7 @@ MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8); -static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port, bool phy_local) { MLXSW_REG_ZERO(pplr, payload); @@ -5846,7 +5860,7 @@ MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4); * the module. * Access: RO */ -MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false); +MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false); static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, u8 ports_width, u8 num_ports) @@ -5915,7 +5929,7 @@ MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12); enum mlxsw_reg_pddr_page_select { MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1, @@ -5944,7 +5958,7 @@ MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16); */ MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16); -static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port, u8 page_select) { MLXSW_REG_ZERO(pddr, payload); @@ -6014,7 +6028,7 @@ MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12); /* reg_pllp_label_port * Front panel label of the port. @@ -6034,7 +6048,7 @@ MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4); */ MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4); -static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(pllp, payload); mlxsw_reg_pllp_local_port_set(payload, local_port); @@ -10245,7 +10259,7 @@ MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN); * The local port to mirror the packets from. * Access: Index */ -MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4); enum mlxsw_reg_mpar_i_e { MLXSW_REG_MPAR_TYPE_EGRESS, @@ -10282,7 +10296,7 @@ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); */ MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32); -static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port, enum mlxsw_reg_mpar_i_e i_e, bool enable, u8 pa_id, u32 probability_rate) @@ -10386,7 +10400,7 @@ MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN); * Local port number. * Access: RW */ -MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24); #define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF @@ -10405,7 +10419,7 @@ MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16); */ MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16); -static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port, bool active) { MLXSW_REG_ZERO(mlcr, payload); @@ -10778,7 +10792,7 @@ MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN); * Not supported for CPU port * Access: Index */ -MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12); /* reg_mpsc_e * Enable sampling on port local_port @@ -10795,7 +10809,7 @@ MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1); */ MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32); -static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, +static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e, u32 rate) { MLXSW_REG_ZERO(mpsc, payload); @@ -11003,7 +11017,7 @@ MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12); enum mlxsw_reg_momte_type { MLXSW_REG_MOMTE_TYPE_WRED = 0x20, @@ -11030,7 +11044,7 @@ MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8); */ MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1); -static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port, enum mlxsw_reg_momte_type type) { MLXSW_REG_ZERO(momte, payload); @@ -11098,7 +11112,7 @@ MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN); * Not supported for CPU port. * Access: Index */ -MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12); enum mlxsw_reg_mtpptr_dir { MLXSW_REG_MTPPTR_DIR_INGRESS, @@ -11305,7 +11319,7 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, * ----------------------------------- */ #define MLXSW_REG_MFDE_ID 0x9200 -#define MLXSW_REG_MFDE_LEN 0x18 +#define MLXSW_REG_MFDE_LEN 0x30 MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN); @@ -11315,10 +11329,32 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN); */ MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8); +enum mlxsw_reg_mfde_severity { + /* Unrecoverable switch behavior */ + MLXSW_REG_MFDE_SEVERITY_FATL = 2, + /* Unexpected state with possible systemic failure */ + MLXSW_REG_MFDE_SEVERITY_NRML = 3, + /* Unexpected state without systemic failure */ + MLXSW_REG_MFDE_SEVERITY_INTR = 5, +}; + +/* reg_mfde_severity + * The severity of the event. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, severity, 0x00, 16, 8); + enum mlxsw_reg_mfde_event_id { + /* CRspace timeout */ MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1, /* KVD insertion machine stopped */ MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP, + /* Triggered by MFGD.trigger_test */ + MLXSW_REG_MFDE_EVENT_ID_TEST, + /* Triggered when firmware hits an assert */ + MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT, + /* Fatal error interrupt from hardware */ + MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE, }; /* reg_mfde_event_id @@ -11359,32 +11395,110 @@ MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2); */ MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16); -/* reg_mfde_log_address +/* reg_mfde_crspace_to_log_address * crspace address accessed, which resulted in timeout. - * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO * Access: RO */ -MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32); +MLXSW_ITEM32(reg, mfde, crspace_to_log_address, 0x10, 0, 32); + +/* reg_mfde_crspace_to_oe + * 0 - New event + * 1 - Old event, occurred before MFGD activation. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, crspace_to_oe, 0x14, 24, 1); -/* reg_mfde_log_id +/* reg_mfde_crspace_to_log_id * Which irisc triggered the timeout. - * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO * Access: RO */ -MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4); +MLXSW_ITEM32(reg, mfde, crspace_to_log_id, 0x14, 0, 4); -/* reg_mfde_log_ip +/* reg_mfde_crspace_to_log_ip * IP (instruction pointer) that triggered the timeout. - * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO * Access: RO */ -MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64); +MLXSW_ITEM64(reg, mfde, crspace_to_log_ip, 0x18, 0, 64); + +/* reg_mfde_kvd_im_stop_oe + * 0 - New event + * 1 - Old event, occurred before MFGD activation. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, kvd_im_stop_oe, 0x10, 24, 1); -/* reg_mfde_pipes_mask +/* reg_mfde_kvd_im_stop_pipes_mask * Bit per kvh pipe. * Access: RO */ -MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16); +MLXSW_ITEM32(reg, mfde, kvd_im_stop_pipes_mask, 0x10, 0, 16); + +/* reg_mfde_fw_assert_var0-4 + * Variables passed to assert. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_var0, 0x10, 0, 32); +MLXSW_ITEM32(reg, mfde, fw_assert_var1, 0x14, 0, 32); +MLXSW_ITEM32(reg, mfde, fw_assert_var2, 0x18, 0, 32); +MLXSW_ITEM32(reg, mfde, fw_assert_var3, 0x1C, 0, 32); +MLXSW_ITEM32(reg, mfde, fw_assert_var4, 0x20, 0, 32); + +/* reg_mfde_fw_assert_existptr + * The instruction pointer when assert was triggered. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_existptr, 0x24, 0, 32); + +/* reg_mfde_fw_assert_callra + * The return address after triggering assert. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_callra, 0x28, 0, 32); + +/* reg_mfde_fw_assert_oe + * 0 - New event + * 1 - Old event, occurred before MFGD activation. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_oe, 0x2C, 24, 1); + +/* reg_mfde_fw_assert_tile_v + * 0: The assert was from main + * 1: The assert was from a tile + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_tile_v, 0x2C, 23, 1); + +/* reg_mfde_fw_assert_tile_index + * When tile_v=1, the tile_index that caused the assert. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_tile_index, 0x2C, 16, 6); + +/* reg_mfde_fw_assert_ext_synd + * A generated one-to-one identifier which is specific per-assert. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fw_assert_ext_synd, 0x2C, 0, 16); + +/* reg_mfde_fatal_cause_id + * HW interrupt cause id. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fatal_cause_id, 0x10, 0, 18); + +/* reg_mfde_fatal_cause_tile_v + * 0: The assert was from main + * 1: The assert was from a tile + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fatal_cause_tile_v, 0x14, 23, 1); + +/* reg_mfde_fatal_cause_tile_index + * When tile_v=1, the tile_index that caused the assert. + * Access: RO + */ +MLXSW_ITEM32(reg, mfde, fatal_cause_tile_index, 0x14, 16, 6); /* TNGCR - Tunneling NVE General Configuration Register * ---------------------------------------------------- @@ -11692,7 +11806,7 @@ MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN); * Local port number (receive port). CPU port is supported. * Access: Index */ -MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12); /* reg_tnqdr_dscp * For encapsulation, the default DSCP. @@ -11700,7 +11814,7 @@ MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6); -static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port) +static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port) { MLXSW_REG_ZERO(tnqdr, payload); mlxsw_reg_tnqdr_local_port_set(payload, local_port); @@ -12028,7 +12142,7 @@ MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN); * For Egress: excludes IP Router * Access: Index */ -MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4); /* reg_sbcm_pg_buff * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress) @@ -12082,7 +12196,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); */ MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); -static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, +static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, u32 min_buff, u32 max_buff, bool infi_max, u8 pool) @@ -12114,7 +12228,7 @@ MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN); * For Egress: excludes IP Router * Access: Index */ -MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12); /* reg_sbpm_pool * The pool associated to quota counting on the local_port. @@ -12168,7 +12282,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); */ MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); -static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, +static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool, enum mlxsw_reg_sbxx_dir dir, bool clr, u32 min_buff, u32 max_buff) { @@ -12266,6 +12380,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN); */ MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1); +#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256 + +/* reg_sbsr_port_page + * Determines the range of the ports specified in the 'ingress_port_mask' + * and 'egress_port_mask' bit masks. + * {ingress,egress}_port_mask[x] is (256 * port_page) + x + * Access: Index + */ +MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4); + /* reg_sbsr_ingress_port_mask * Bit vector for all ingress network ports. * Indicates which of the ports (for which the relevant bit is set) @@ -12353,7 +12477,7 @@ MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN); * Not supported for CPU port and router port * Access: Index */ -MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12); /* reg_sbib_buff_size * Units represented in cells @@ -12363,7 +12487,7 @@ MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24); -static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, +static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port, u32 buff_size) { MLXSW_REG_ZERO(sbib, payload); @@ -12374,7 +12498,6 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(sgcr), MLXSW_REG(spad), - MLXSW_REG(smid), MLXSW_REG(sspr), MLXSW_REG(sfdat), MLXSW_REG(sfd), @@ -12384,7 +12507,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvm), MLXSW_REG(spaft), MLXSW_REG(sfgc), - MLXSW_REG(sftr), MLXSW_REG(sfdf), MLXSW_REG(sldr), MLXSW_REG(slcr), @@ -12397,6 +12519,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(spvc), MLXSW_REG(spevet), + MLXSW_REG(sftr2), + MLXSW_REG(smid2), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), MLXSW_REG(pgcr), @@ -12556,7 +12680,7 @@ MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8); * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8); +MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12); /* reg_pude_admin_status * Port administrative state (the desired state). diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5925db386b1b..aa411dec62f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -46,8 +46,8 @@ #include "spectrum_trap.h" #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 3326 +#define MLXSW_SP1_FWREV_MINOR 2010 +#define MLXSW_SP1_FWREV_SUBMINOR 1006 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -63,8 +63,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP2_FWREV_MAJOR 29 -#define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 3326 +#define MLXSW_SP2_FWREV_MINOR 2010 +#define MLXSW_SP2_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -78,8 +78,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP3_FWREV_MAJOR 30 -#define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 3326 +#define MLXSW_SP3_FWREV_MINOR 2010 +#define MLXSW_SP3_FWREV_SUBMINOR 1006 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -95,6 +95,7 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; +static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4"; static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 @@ -303,7 +304,7 @@ int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, - unsigned char *addr) + const unsigned char *addr) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char ppad_pl[MLXSW_REG_PPAD_LEN]; @@ -352,7 +353,7 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) } static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 swid) + u16 local_port, u8 swid) { char pspa_pl[MLXSW_REG_PSPA_LEN]; @@ -483,7 +484,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) } static int -mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, +mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, struct mlxsw_sp_port_mapping *port_mapping) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -535,7 +536,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, } static int -mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, +mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_port_mapping *port_mapping) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -560,7 +561,7 @@ err_pmlp_write: return err; } -static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 module) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -1474,7 +1475,7 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 *port_number, + u16 local_port, u8 *port_number, u8 *split_port_subnumber, u8 *slot_index) { @@ -1490,7 +1491,7 @@ static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, bool split, struct mlxsw_sp_port_mapping *port_mapping) { @@ -1781,7 +1782,7 @@ err_port_swid_set: return err; } -static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; u8 module = mlxsw_sp_port->mapping.module; @@ -1848,12 +1849,12 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp_port); } -static bool mlxsw_sp_local_port_valid(u8 local_port) +static bool mlxsw_sp_local_port_valid(u16 local_port) { return local_port != MLXSW_PORT_CPU_PORT; } -static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) { if (!mlxsw_sp_local_port_valid(local_port)) return false; @@ -1971,7 +1972,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, split_port_mapping = *port_mapping; split_port_mapping.width /= count; for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (!mlxsw_sp_local_port_valid(s_local_port)) continue; @@ -1987,7 +1988,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, err_port_create: for (i--; i >= 0; i--) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2004,7 +2005,7 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, /* Go over original unsplit ports in the gap and recreate them. */ for (i = 0; i < count; i++) { - u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); port_mapping = mlxsw_sp->port_mapping[local_port]; if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) @@ -2015,14 +2016,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_port * -mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) +mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port) { if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) return mlxsw_sp->ports[local_port]; return NULL; } -static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, +static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, unsigned int count, struct netlink_ext_ack *extack) { @@ -2065,7 +2066,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, port_mapping = mlxsw_sp_port->mapping; for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2085,7 +2086,7 @@ err_port_split_create: return err; } -static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -2121,7 +2122,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, } for (i = 0; i < count; i++) { - u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) mlxsw_sp_port_remove(mlxsw_sp, s_local_port); @@ -2148,12 +2149,12 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, struct mlxsw_sp_port *mlxsw_sp_port; enum mlxsw_reg_pude_oper_status status; unsigned int max_ports; - u8 local_port; + u16 local_port; max_ports = mlxsw_core_max_ports(mlxsw_sp->core); local_port = mlxsw_reg_pude_local_port_get(pude_pl); - if (WARN_ON_ONCE(local_port >= max_ports)) + if (WARN_ON_ONCE(!local_port || local_port >= max_ports)) return; mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) @@ -2174,7 +2175,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp, char *mtpptr_pl, bool ingress) { - u8 local_port; + u16 local_port; u8 num_rec; int i; @@ -2212,7 +2213,7 @@ static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg, } void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, - u8 local_port, void *priv) + u16 local_port, void *priv) { struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; @@ -2236,7 +2237,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, netif_receive_skb(skb); } -static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port, void *priv) { skb->offload_fwd_mark = 1; @@ -2244,7 +2245,7 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, } static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, - u8 local_port, void *priv) + u16 local_port, void *priv) { skb->offload_l3_fwd_mark = 1; skb->offload_fwd_mark = 1; @@ -2252,7 +2253,7 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, } void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) + u16 local_port) { mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port); } @@ -2755,6 +2756,140 @@ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) mutex_destroy(&mlxsw_sp->parsing.lock); } +struct mlxsw_sp_ipv6_addr_node { + struct in6_addr key; + struct rhash_head ht_node; + u32 kvdl_index; + refcount_t refcount; +}; + +static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key), + .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node), + .key_len = sizeof(struct in6_addr), + .automatic_shrinking = true, +}; + +static int +mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + struct mlxsw_sp_ipv6_addr_node *node; + char rips_pl[MLXSW_REG_RIPS_LEN]; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + p_kvdl_index); + if (err) + return err; + + mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); + if (err) + goto err_rips_write; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + err = -ENOMEM; + goto err_node_alloc; + } + + node->key = *addr6; + node->kvdl_index = *p_kvdl_index; + refcount_set(&node->refcount, 1); + + err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht, + &node->ht_node, + mlxsw_sp_ipv6_addr_ht_params); + if (err) + goto err_rhashtable_insert; + + return 0; + +err_rhashtable_insert: + kfree(node); +err_node_alloc: +err_rips_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + *p_kvdl_index); + return err; +} + +static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipv6_addr_node *node) +{ + u32 kvdl_index = node->kvdl_index; + + rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node, + mlxsw_sp_ipv6_addr_ht_params); + kfree(node); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + kvdl_index); +} + +int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + struct mlxsw_sp_ipv6_addr_node *node; + int err = 0; + + mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); + node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, + mlxsw_sp_ipv6_addr_ht_params); + if (node) { + refcount_inc(&node->refcount); + *p_kvdl_index = node->kvdl_index; + goto out_unlock; + } + + err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index); + +out_unlock: + mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); + return err; +} + +void +mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6) +{ + struct mlxsw_sp_ipv6_addr_node *node; + + mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock); + node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6, + mlxsw_sp_ipv6_addr_ht_params); + if (WARN_ON(!node)) + goto out_unlock; + + if (!refcount_dec_and_test(&node->refcount)) + goto out_unlock; + + mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node); + +out_unlock: + mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock); +} + +static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht, + &mlxsw_sp_ipv6_addr_ht_params); + if (err) + return err; + + mutex_init(&mlxsw_sp->ipv6_addr_ht_lock); + return 0; +} + +static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp) +{ + mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock); + rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct netlink_ext_ack *extack) @@ -2843,6 +2978,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } + err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n"); + goto err_ipv6_addr_ht_init; + } + err = mlxsw_sp_nve_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n"); @@ -2944,6 +3085,8 @@ err_router_init: err_acl_init: mlxsw_sp_nve_fini(mlxsw_sp); err_nve_init: + mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); +err_ipv6_addr_ht_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -3013,6 +3156,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; @@ -3042,6 +3186,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; @@ -3058,6 +3203,36 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); } +static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops; + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops; + mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; + mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; + mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; + mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; + mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; + mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; + mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3075,6 +3250,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_nve_fini(mlxsw_sp); + mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3290,10 +3466,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core) u8 max_rif_mac_profiles; if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES)) - return -EIO; - - max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, - MAX_RIF_MAC_PROFILES); + max_rif_mac_profiles = 1; + else + max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_RIF_MAC_PROFILES); devlink_resource_size_params_init(&size_params, max_rif_mac_profiles, max_rif_mac_profiles, 1, DEVLINK_RESOURCE_UNIT_ENTRY); @@ -3336,7 +3512,7 @@ err_resources_rif_mac_profile_register: err_policer_resources_register: err_resources_counter_register: err_resources_span_register: - devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); + devlink_resources_unregister(priv_to_devlink(mlxsw_core)); return err; } @@ -3370,7 +3546,7 @@ err_resources_rif_mac_profile_register: err_policer_resources_register: err_resources_counter_register: err_resources_span_register: - devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL); + devlink_resources_unregister(priv_to_devlink(mlxsw_core)); return err; } @@ -3486,7 +3662,7 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) } static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3616,6 +3792,45 @@ static struct mlxsw_driver mlxsw_sp3_driver = { .temp_warn_enabled = true, }; +static struct mlxsw_driver mlxsw_sp4_driver = { + .kind = mlxsw_sp4_driver_name, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp4_init, + .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .trap_init = mlxsw_sp_trap_init, + .trap_fini = mlxsw_sp_trap_fini, + .trap_action_set = mlxsw_sp_trap_action_set, + .trap_group_init = mlxsw_sp_trap_group_init, + .trap_group_set = mlxsw_sp_trap_group_set, + .trap_policer_init = mlxsw_sp_trap_policer_init, + .trap_policer_fini = mlxsw_sp_trap_policer_fini, + .trap_policer_set = mlxsw_sp_trap_policer_set, + .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp2_resources_register, + .params_register = mlxsw_sp2_params_register, + .params_unregister = mlxsw_sp2_params_unregister, + .ptp_transmitted = mlxsw_sp_ptp_transmitted, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp2_config_profile, + .res_query_enabled = true, + .fw_fatal_enabled = true, + .temp_warn_enabled = true, +}; + bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; @@ -4783,6 +4998,16 @@ static struct pci_driver mlxsw_sp3_pci_driver = { .id_table = mlxsw_sp3_pci_id_table, }; +static const struct pci_device_id mlxsw_sp4_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp4_pci_driver = { + .name = mlxsw_sp4_driver_name, + .id_table = mlxsw_sp4_pci_id_table, +}; + static int __init mlxsw_sp_module_init(void) { int err; @@ -4802,6 +5027,10 @@ static int __init mlxsw_sp_module_init(void) if (err) goto err_sp3_core_driver_register; + err = mlxsw_core_driver_register(&mlxsw_sp4_driver); + if (err) + goto err_sp4_core_driver_register; + err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); if (err) goto err_sp1_pci_driver_register; @@ -4814,13 +5043,21 @@ static int __init mlxsw_sp_module_init(void) if (err) goto err_sp3_pci_driver_register; + err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver); + if (err) + goto err_sp4_pci_driver_register; + return 0; +err_sp4_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); err_sp3_pci_driver_register: mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); err_sp2_pci_driver_register: mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); err_sp1_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp4_driver); +err_sp4_core_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp3_driver); err_sp3_core_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp2_driver); @@ -4834,9 +5071,11 @@ err_sp1_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { + mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver); mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver); mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sp4_driver); mlxsw_core_driver_unregister(&mlxsw_sp3_driver); mlxsw_core_driver_unregister(&mlxsw_sp2_driver); mlxsw_core_driver_unregister(&mlxsw_sp1_driver); @@ -4853,6 +5092,7 @@ MODULE_DESCRIPTION("Mellanox Spectrum driver"); MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 32fdd37657dd..bb2442e1f705 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -190,6 +190,7 @@ struct mlxsw_sp { const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; + const struct mlxsw_sp_acl_bf_ops *acl_bf_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; const struct mlxsw_sp_sb_vals *sb_vals; const struct mlxsw_sp_sb_ops *sb_ops; @@ -203,6 +204,8 @@ struct mlxsw_sp { const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; + struct rhashtable ipv6_addr_ht; + struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */ }; struct mlxsw_sp_ptp_ops { @@ -217,13 +220,13 @@ struct mlxsw_sp_ptp_ops { * is responsible for freeing the passed-in SKB. */ void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); /* Notify a driver that a timestamped packet was transmitted. Driver * is responsible for freeing the passed-in SKB. */ void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, struct hwtstamp_config *config); @@ -261,7 +264,7 @@ enum mlxsw_sp_sample_trigger_type { struct mlxsw_sp_sample_trigger { enum mlxsw_sp_sample_trigger_type type; - u8 local_port; /* Reserved when trigger type is not ingress / egress. */ + u16 local_port; /* Reserved when trigger type is not ingress / egress. */ }; struct mlxsw_sp_sample_params { @@ -308,7 +311,7 @@ struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; - u8 local_port; + u16 local_port; u8 lagged:1, split:1; u16 pvid; @@ -370,7 +373,7 @@ struct mlxsw_sp_port_type_speed_ops { u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width, const struct ethtool_link_ksettings *cmd); void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, bool autoneg); + u16 local_port, u32 proto_admin, bool autoneg); void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload, u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, @@ -441,7 +444,7 @@ static inline struct mlxsw_sp_port * mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) { struct mlxsw_sp_port *mlxsw_sp_port; - u8 local_port; + u16 local_port; local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, lag_id, port_index); @@ -587,6 +590,11 @@ mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_sample_trigger *trigger); +int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index); +void +mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6); extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; @@ -621,9 +629,9 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier; /* spectrum.c */ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb, - u8 local_port, void *priv); + u16 local_port, void *priv); void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, @@ -729,7 +737,7 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); +u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, enum mlxsw_sp_l3proto ul_proto, const union mlxsw_sp_l3addr *ul_sip, @@ -1099,6 +1107,11 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; /* spectrum_acl_flex_keys.c */ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; +extern const struct mlxsw_afk_ops mlxsw_sp4_afk_ops; + +/* spectrum_acl_bloom_filter.c */ +extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops; +extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops; /* spectrum_matchall.c */ struct mlxsw_sp_mall_ops { @@ -1222,7 +1235,7 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid); void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid, const struct net_device *nve_dev); int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, - enum mlxsw_sp_flood_type packet_type, u8 local_port, + enum mlxsw_sp_flood_type packet_type, u16 local_port, bool member); int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); @@ -1310,6 +1323,17 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, enum mlxsw_sp_l3proto proto, union mlxsw_sp_l3addr *addr); +int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index); +void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6); +int +mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index, + const struct in6_addr *new_addr6); +void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index); int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index a11d911302f1..e4f4cded2b6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -45,8 +45,8 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp, } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = { - MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, MLXSW_AFK_ELEMENT_SRC_IP_0_31, MLXSW_AFK_ELEMENT_DST_IP_0_31, }; @@ -89,8 +89,8 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam) } static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = { - MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, MLXSW_AFK_ELEMENT_SRC_IP_96_127, MLXSW_AFK_ELEMENT_SRC_IP_64_95, MLXSW_AFK_ELEMENT_SRC_IP_32_63, @@ -189,10 +189,10 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, rulei = mlxsw_sp_acl_rule_rulei(rule); rulei->priority = priority; - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB, key->vrid, GENMASK(7, 0)); mlxsw_sp_acl_rulei_keymask_u32(rulei, - MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, key->vrid >> 8, GENMASK(2, 0)); switch (key->proto) { case MLXSW_SP_L3_PROTO_IPV4: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 67cedfa76f78..70c11bfac08f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -406,7 +406,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port; - u8 local_port; + u16 local_port; bool in_port; if (out_dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index dbd3bebf11ec..e2aced7ab454 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -17,9 +17,9 @@ struct mlxsw_sp_acl_bf { }; /* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key - * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to - * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key, - * depending on the actual number of key blocks used in the region. + * blocks, eRP ID and region ID. In Spectrum-2 and above, region key is combined + * of up to 12 key blocks, so there can be up to 3 chunks in the Bloom filter + * key, depending on the actual number of key blocks used in the region. * The layout of the Bloom filter key is as follows: * * +-------------------------+------------------------+------------------------+ @@ -27,7 +27,9 @@ struct mlxsw_sp_acl_bf { * +-------------------------+------------------------+------------------------+ */ #define MLXSW_BLOOM_KEY_CHUNKS 3 -#define MLXSW_BLOOM_KEY_LEN 69 + +/* Spectrum-2 and Spectrum-3 chunks */ +#define MLXSW_SP2_BLOOM_KEY_LEN 69 /* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is * 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero @@ -42,31 +44,21 @@ struct mlxsw_sp_acl_bf { * | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) | * +---------+-----------+----------+-----------------------------------+ */ -#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3 -#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18 -#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23 +#define MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES 3 +#define MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES 18 +#define MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES 23 /* The offset of the key block within a chunk is 5 bytes as it comes after * 3 bytes of zero padding and 16 bits of region ID and eRP ID. */ -#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5 +#define MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET 5 -/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8, - * and we need to populate it with 4 key blocks copied from the entry encoded - * key. Since the encoded key contains a padding, key block 11 starts at offset - * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take - * 18 bytes. - * This array defines key offsets for easy access when copying key blocks from - * entry key to Bloom filter chunk. - */ -static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38}; - -/* This table is just the CRC of each possible byte. It is - * computed, Msbit first, for the Bloom filter polynomial - * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and +/* This table is just the CRC of each possible byte which is used for + * Spectrum-{2-3}. It is computed, Msbit first, for the Bloom filter + * polynomial which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and * the implicit x^16). */ -static const u16 mlxsw_sp_acl_bf_crc_tab[256] = { +static const u16 mlxsw_sp2_acl_bf_crc16_tab[256] = { 0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d, 0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a, 0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a, @@ -101,24 +93,146 @@ static const u16 mlxsw_sp_acl_bf_crc_tab[256] = { 0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1, }; -static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c) +/* Spectrum-4 chunks */ +#define MLXSW_SP4_BLOOM_KEY_LEN 60 + +/* In Spectrum-4, there is no padding. Each chunk size is 20 bytes. + * 18 bytes of it contain 4 key blocks, each is 36 bits, and 2 bytes which hold + * eRP ID and region ID. + * The layout of each chunk is as follows: + * + * +----------------------+-----------------------------------+ + * | 2 bytes | 18 bytes | + * +-----------+----------+-----------------------------------+ + * | 157:148 | 147:144 | 143:0 | + * +---------+-----------+----------+-------------------------+ + * | region ID | eRP ID | 4 Key blocks (18 Bytes) | + * +-----------+----------+-----------------------------------+ + */ + +#define MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES 0 +#define MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES 18 +#define MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES 20 + +/* The offset of the key block within a chunk is 2 bytes as it comes after + * 16 bits of region ID and eRP ID. + */ +#define MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET 2 + +/* For Spectrum-4, two hash functions are used, CRC-10 and CRC-6 based. + * The result is combination of the two calculations - + * 6 bit column are MSB (result of CRC-6), + * 10 bit row are LSB (result of CRC-10). + */ + +/* This table is just the CRC of each possible byte which is used for + * Spectrum-4. It is computed, Msbit first, for the Bloom filter + * polynomial which is 0x1b (1 + x^1 + x^3 + x^4 and the implicit x^10). + */ +static const u16 mlxsw_sp4_acl_bf_crc10_tab[256] = { +0x0000, 0x001b, 0x0036, 0x002d, 0x006c, 0x0077, 0x005a, 0x0041, +0x00d8, 0x00c3, 0x00ee, 0x00f5, 0x00b4, 0x00af, 0x0082, 0x0099, +0x01b0, 0x01ab, 0x0186, 0x019d, 0x01dc, 0x01c7, 0x01ea, 0x01f1, +0x0168, 0x0173, 0x015e, 0x0145, 0x0104, 0x011f, 0x0132, 0x0129, +0x0360, 0x037b, 0x0356, 0x034d, 0x030c, 0x0317, 0x033a, 0x0321, +0x03b8, 0x03a3, 0x038e, 0x0395, 0x03d4, 0x03cf, 0x03e2, 0x03f9, +0x02d0, 0x02cb, 0x02e6, 0x02fd, 0x02bc, 0x02a7, 0x028a, 0x0291, +0x0208, 0x0213, 0x023e, 0x0225, 0x0264, 0x027f, 0x0252, 0x0249, +0x02db, 0x02c0, 0x02ed, 0x02f6, 0x02b7, 0x02ac, 0x0281, 0x029a, +0x0203, 0x0218, 0x0235, 0x022e, 0x026f, 0x0274, 0x0259, 0x0242, +0x036b, 0x0370, 0x035d, 0x0346, 0x0307, 0x031c, 0x0331, 0x032a, +0x03b3, 0x03a8, 0x0385, 0x039e, 0x03df, 0x03c4, 0x03e9, 0x03f2, +0x01bb, 0x01a0, 0x018d, 0x0196, 0x01d7, 0x01cc, 0x01e1, 0x01fa, +0x0163, 0x0178, 0x0155, 0x014e, 0x010f, 0x0114, 0x0139, 0x0122, +0x000b, 0x0010, 0x003d, 0x0026, 0x0067, 0x007c, 0x0051, 0x004a, +0x00d3, 0x00c8, 0x00e5, 0x00fe, 0x00bf, 0x00a4, 0x0089, 0x0092, +0x01ad, 0x01b6, 0x019b, 0x0180, 0x01c1, 0x01da, 0x01f7, 0x01ec, +0x0175, 0x016e, 0x0143, 0x0158, 0x0119, 0x0102, 0x012f, 0x0134, +0x001d, 0x0006, 0x002b, 0x0030, 0x0071, 0x006a, 0x0047, 0x005c, +0x00c5, 0x00de, 0x00f3, 0x00e8, 0x00a9, 0x00b2, 0x009f, 0x0084, +0x02cd, 0x02d6, 0x02fb, 0x02e0, 0x02a1, 0x02ba, 0x0297, 0x028c, +0x0215, 0x020e, 0x0223, 0x0238, 0x0279, 0x0262, 0x024f, 0x0254, +0x037d, 0x0366, 0x034b, 0x0350, 0x0311, 0x030a, 0x0327, 0x033c, +0x03a5, 0x03be, 0x0393, 0x0388, 0x03c9, 0x03d2, 0x03ff, 0x03e4, +0x0376, 0x036d, 0x0340, 0x035b, 0x031a, 0x0301, 0x032c, 0x0337, +0x03ae, 0x03b5, 0x0398, 0x0383, 0x03c2, 0x03d9, 0x03f4, 0x03ef, +0x02c6, 0x02dd, 0x02f0, 0x02eb, 0x02aa, 0x02b1, 0x029c, 0x0287, +0x021e, 0x0205, 0x0228, 0x0233, 0x0272, 0x0269, 0x0244, 0x025f, +0x0016, 0x000d, 0x0020, 0x003b, 0x007a, 0x0061, 0x004c, 0x0057, +0x00ce, 0x00d5, 0x00f8, 0x00e3, 0x00a2, 0x00b9, 0x0094, 0x008f, +0x01a6, 0x01bd, 0x0190, 0x018b, 0x01ca, 0x01d1, 0x01fc, 0x01e7, +0x017e, 0x0165, 0x0148, 0x0153, 0x0112, 0x0109, 0x0124, 0x013f, +}; + +/* This table is just the CRC of each possible byte which is used for + * Spectrum-4. It is computed, Msbit first, for the Bloom filter + * polynomial which is 0x2d (1 + x^2+ x^3 + x^5 and the implicit x^6). + */ +static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = { +0x00, 0x2d, 0x37, 0x1a, 0x03, 0x2e, 0x34, 0x19, +0x06, 0x2b, 0x31, 0x1c, 0x05, 0x28, 0x32, 0x1f, +0x0c, 0x21, 0x3b, 0x16, 0x0f, 0x22, 0x38, 0x15, +0x0a, 0x27, 0x3d, 0x10, 0x09, 0x24, 0x3e, 0x13, +0x18, 0x35, 0x2f, 0x02, 0x1b, 0x36, 0x2c, 0x01, +0x1e, 0x33, 0x29, 0x04, 0x1d, 0x30, 0x2a, 0x07, +0x14, 0x39, 0x23, 0x0e, 0x17, 0x3a, 0x20, 0x0d, +0x12, 0x3f, 0x25, 0x08, 0x11, 0x3c, 0x26, 0x0b, +0x30, 0x1d, 0x07, 0x2a, 0x33, 0x1e, 0x04, 0x29, +0x36, 0x1b, 0x01, 0x2c, 0x35, 0x18, 0x02, 0x2f, +0x3c, 0x11, 0x0b, 0x26, 0x3f, 0x12, 0x08, 0x25, +0x3a, 0x17, 0x0d, 0x20, 0x39, 0x14, 0x0e, 0x23, +0x28, 0x05, 0x1f, 0x32, 0x2b, 0x06, 0x1c, 0x31, +0x2e, 0x03, 0x19, 0x34, 0x2d, 0x00, 0x1a, 0x37, +0x24, 0x09, 0x13, 0x3e, 0x27, 0x0a, 0x10, 0x3d, +0x22, 0x0f, 0x15, 0x38, 0x21, 0x0c, 0x16, 0x3b, +0x0d, 0x20, 0x3a, 0x17, 0x0e, 0x23, 0x39, 0x14, +0x0b, 0x26, 0x3c, 0x11, 0x08, 0x25, 0x3f, 0x12, +0x01, 0x2c, 0x36, 0x1b, 0x02, 0x2f, 0x35, 0x18, +0x07, 0x2a, 0x30, 0x1d, 0x04, 0x29, 0x33, 0x1e, +0x15, 0x38, 0x22, 0x0f, 0x16, 0x3b, 0x21, 0x0c, +0x13, 0x3e, 0x24, 0x09, 0x10, 0x3d, 0x27, 0x0a, +0x19, 0x34, 0x2e, 0x03, 0x1a, 0x37, 0x2d, 0x00, +0x1f, 0x32, 0x28, 0x05, 0x1c, 0x31, 0x2b, 0x06, +0x3d, 0x10, 0x0a, 0x27, 0x3e, 0x13, 0x09, 0x24, +0x3b, 0x16, 0x0c, 0x21, 0x38, 0x15, 0x0f, 0x22, +0x31, 0x1c, 0x06, 0x2b, 0x32, 0x1f, 0x05, 0x28, +0x37, 0x1a, 0x00, 0x2d, 0x34, 0x19, 0x03, 0x2e, +0x25, 0x08, 0x12, 0x3f, 0x26, 0x0b, 0x11, 0x3c, +0x23, 0x0e, 0x14, 0x39, 0x20, 0x0d, 0x17, 0x3a, +0x29, 0x04, 0x1e, 0x33, 0x2a, 0x07, 0x1d, 0x30, +0x2f, 0x02, 0x18, 0x35, 0x2c, 0x01, 0x1b, 0x36, +}; + +/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8, + * and we need to populate it with 4 key blocks copied from the entry encoded + * key. The original keys layout is same for Spectrum-{2,3,4}. + * Since the encoded key contains a 2 bytes padding, key block 11 starts at + * offset 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks + * take 18 bytes. See 'MLXSW_SP2_AFK_BLOCK_LAYOUT' for more details. + * This array defines key offsets for easy access when copying key blocks from + * entry key to Bloom filter chunk. + */ +static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38}; + +static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c) { - return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c]; + return (crc << 8) ^ mlxsw_sp2_acl_bf_crc16_tab[(crc >> 8) ^ c]; } -static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len) +static u16 mlxsw_sp2_acl_bf_crc(const u8 *buffer, size_t len) { u16 crc = 0; while (len--) - crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++); + crc = mlxsw_sp2_acl_bf_crc16_byte(crc, *buffer++); return crc; } static void -mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_atcam_entry *aentry, - char *output, u8 *len) +__mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + char *output, u8 *len, u8 max_chunks, u8 pad_bytes, + u8 key_offset, u8 chunk_key_len, u8 chunk_len) { struct mlxsw_afk_key_info *key_info = aregion->region->key_info; u8 chunk_index, chunk_count, block_count; @@ -129,37 +243,168 @@ mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, chunk_count = 1 + ((block_count - 1) >> 2); erp_region_id = cpu_to_be16(aentry->ht_key.erp_id | (aregion->region->id << 4)); - for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count; - chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) { - memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES); - memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id, + for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks; + chunk_index++) { + memset(chunk, 0, pad_bytes); + memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); - memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET, + memcpy(chunk + key_offset, &aentry->enc_key[chunk_key_offsets[chunk_index]], - MLXSW_BLOOM_CHUNK_KEY_BYTES); - chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES; + chunk_key_len); + chunk += chunk_len; } - *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES; + *len = chunk_count * chunk_len; +} + +static void +mlxsw_sp2_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + char *output, u8 *len) +{ + __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len, + MLXSW_BLOOM_KEY_CHUNKS, + MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES, + MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET, + MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES, + MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES); } static unsigned int -mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf, - unsigned int erp_bank, - unsigned int bf_index) +mlxsw_sp2_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) { - return erp_bank * bf->bank_size + bf_index; + char bf_key[MLXSW_SP2_BLOOM_KEY_LEN]; + u8 bf_size; + + mlxsw_sp2_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size); + return mlxsw_sp2_acl_bf_crc(bf_key, bf_size); +} + +static u16 mlxsw_sp4_acl_bf_crc10_byte(u16 crc, u8 c) +{ + u8 index = ((crc >> 2) ^ c) & 0xff; + + return ((crc << 8) ^ mlxsw_sp4_acl_bf_crc10_tab[index]) & 0x3ff; +} + +static u16 mlxsw_sp4_acl_bf_crc6_byte(u16 crc, u8 c) +{ + u8 index = (crc ^ c) & 0xff; + + return ((crc << 6) ^ (mlxsw_sp4_acl_bf_crc6_tab[index] << 2)) & 0xfc; +} + +static u16 mlxsw_sp4_acl_bf_crc(const u8 *buffer, size_t len) +{ + u16 crc_row = 0, crc_col = 0; + + while (len--) { + crc_row = mlxsw_sp4_acl_bf_crc10_byte(crc_row, *buffer); + crc_col = mlxsw_sp4_acl_bf_crc6_byte(crc_col, *buffer); + buffer++; + } + + crc_col >>= 2; + + /* 6 bit column are MSB, 10 bit row are LSB */ + return (crc_col << 10) | crc_row; +} + +static void right_shift_array(char *arr, u8 len, u8 shift_bits) +{ + u8 byte_mask = 0xff >> shift_bits; + int i; + + if (WARN_ON(!shift_bits || shift_bits >= 8)) + return; + + for (i = len - 1; i >= 0; i--) { + /* The first iteration looks like out-of-bounds access, + * but actually references a buffer that the array is shifted + * into. This move is legal as we never send the last chunk to + * this function. + */ + arr[i + 1] &= byte_mask; + arr[i + 1] |= arr[i] << (8 - shift_bits); + arr[i] = arr[i] >> shift_bits; + } +} + +static void mlxsw_sp4_bf_key_shift_chunks(u8 chunk_count, char *output) +{ + /* The chunks are suppoosed to be continuous, with no padding. + * Since region ID and eRP ID use 14 bits, and not fully 2 bytes, + * and in Spectrum-4 there is no padding, it is necessary to shift some + * chunks 2 bits right. + */ + switch (chunk_count) { + case 2: + /* The chunks are copied as follow: + * +-------------+-----------------+ + * | Chunk 0 | Chunk 1 | + * | IDs | keys |(**) IDs | keys | + * +-------------+-----------------+ + * In (**), there are two unused bits, therefore, chunk 0 needs + * to be shifted two bits right. + */ + right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2); + break; + case 3: + /* The chunks are copied as follow: + * +-------------+-----------------+-----------------+ + * | Chunk 0 | Chunk 1 | Chunk 2 | + * | IDs | keys |(**) IDs | keys |(**) IDs | keys | + * +-------------+-----------------+-----------------+ + * In (**), there are two unused bits, therefore, chunk 1 needs + * to be shifted two bits right and chunk 0 needs to be shifted + * four bits right. + */ + right_shift_array(output + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2); + right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 4); + break; + default: + WARN_ON(chunk_count > MLXSW_BLOOM_KEY_CHUNKS); + } +} + +static void +mlxsw_sp4_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + char *output, u8 *len) +{ + struct mlxsw_afk_key_info *key_info = aregion->region->key_info; + u8 block_count = mlxsw_afk_key_info_blocks_count_get(key_info); + u8 chunk_count = 1 + ((block_count - 1) >> 2); + + __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len, + MLXSW_BLOOM_KEY_CHUNKS, + MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES, + MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET, + MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES, + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES); + mlxsw_sp4_bf_key_shift_chunks(chunk_count, output); } static unsigned int -mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf, - struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_atcam_entry *aentry) +mlxsw_sp4_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) { - char bf_key[MLXSW_BLOOM_KEY_LEN]; + char bf_key[MLXSW_SP4_BLOOM_KEY_LEN] = {}; u8 bf_size; - mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size); - return mlxsw_sp_acl_bf_crc(bf_key, bf_size); + mlxsw_sp4_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size); + return mlxsw_sp4_acl_bf_crc(bf_key, bf_size); +} + +static unsigned int +mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf, + unsigned int erp_bank, + unsigned int bf_index) +{ + return erp_bank * bf->bank_size + bf_index; } int @@ -176,7 +421,7 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp, mutex_lock(&bf->lock); - bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); + bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry); rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, bf_index); @@ -219,7 +464,7 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp, mutex_lock(&bf->lock); - bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); + bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry); rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, bf_index); @@ -267,3 +512,11 @@ void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf) mutex_destroy(&bf->lock); kfree(bf); } + +const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops = { + .index_get = mlxsw_sp2_acl_bf_index_get, +}; + +const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops = { + .index_get = mlxsw_sp4_acl_bf_index_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index c72aa38424dc..50806594d977 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -83,7 +83,7 @@ static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, - u8 local_port) + u16 local_port) { struct mlxsw_sp *mlxsw_sp = priv; char ppbs_pl[MLXSW_REG_PPBS_LEN]; @@ -132,7 +132,7 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index) } static int -mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, +mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id) { @@ -159,7 +159,7 @@ err_analyzed_port_get: } static void -mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) +mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress) { struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = priv; @@ -192,7 +192,7 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index) policer_index); } -static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, +static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, @@ -202,7 +202,7 @@ static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port, return -EOPNOTSUPP; } -static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id, +static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id, bool ingress) { WARN_ON_ONCE(1); @@ -224,7 +224,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .sampler_del = mlxsw_sp1_act_sampler_del, }; -static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port, +static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port, struct psample_group *psample_group, u32 rate, u32 trunc_size, bool truncate, bool ingress, int *p_span_id, @@ -272,7 +272,7 @@ err_span_agent_get: return err; } -static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id, +static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id, bool ingress) { struct mlxsw_sp_sample_trigger trigger = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 279c241f76f0..00c32320f891 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -168,8 +168,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { @@ -311,3 +311,45 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { .encode_block = mlxsw_sp2_afk_encode_block, .clear_block = mlxsw_sp2_afk_clear_block, }; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12), + MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), +}; + +static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), + MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), + MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), + MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b), + MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), + MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b), + MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), + MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), + MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b), + MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), + MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), + MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), + MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), +}; + +const struct mlxsw_afk_ops mlxsw_sp4_afk_ops = { + .blocks = mlxsw_sp4_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp4_afk_blocks), + .encode_block = mlxsw_sp2_afk_encode_block, + .clear_block = mlxsw_sp2_afk_clear_block, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index a41df10ade9b..edbbc89e7a71 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -287,6 +287,12 @@ void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_bf; +struct mlxsw_sp_acl_bf_ops { + unsigned int (*index_get)(struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry); +}; + int mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_bf *bf, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index d78cf5a7220a..98f26f596e30 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -160,7 +160,7 @@ static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) } static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 pg_buff, + u16 local_port, u8 pg_buff, enum mlxsw_reg_sbxx_dir dir) { struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port]; @@ -173,7 +173,7 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u16 pool_index) + u16 local_port, u16 pool_index) { return &mlxsw_sp->sb->ports[local_port].pms[pool_index]; } @@ -202,7 +202,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, return 0; } -static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 pg_buff, u32 min_buff, u32 max_buff, bool infi_max, u16 pool_index) { @@ -232,7 +232,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; } -static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, u32 min_buff, u32 max_buff) { const struct mlxsw_sp_sb_pool_des *des = @@ -253,7 +253,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; } -static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = @@ -279,7 +279,7 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); } -static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = @@ -919,7 +919,7 @@ mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) return pr->mode == MLXSW_REG_SBPR_MODE_STATIC; } -static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, enum mlxsw_reg_sbxx_dir dir, const struct mlxsw_sp_sb_cm *cms, size_t cms_len) @@ -1037,7 +1037,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = { MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), }; -static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_sb_pm *pms, bool skip_ingress) { @@ -1416,7 +1416,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); @@ -1432,7 +1432,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u32 max_buff; int err; @@ -1458,7 +1458,7 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, @@ -1479,7 +1479,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; const struct mlxsw_sp_sb_cm *cm; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; @@ -1526,7 +1526,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_sr_occ_query_cb_ctx { u8 masked_count; - u8 local_port_1; + u16 local_port_1; }; static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, @@ -1536,7 +1536,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; u8 masked_count; - u8 local_port; + u16 local_port; int rec_index = 0; struct mlxsw_sp_sb_cm *cm; int i; @@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u16 local_port, local_port_1, last_local_port; struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + u8 masked_count, current_page = 0; unsigned long cb_priv = 0; LIST_HEAD(bulk_list); char *sbsr_pl; - u8 masked_count; - u8 local_port_1; - u8 local_port; int i; int err; int err2; @@ -1602,6 +1601,10 @@ next_batch: local_port_1 = local_port; masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); + mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) @@ -1609,6 +1612,10 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; + if (local_port > last_local_port) { + current_page++; + goto do_query; + } if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, @@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u16 local_port, last_local_port; LIST_HEAD(bulk_list); - char *sbsr_pl; unsigned int masked_count; - u8 local_port; + u8 current_page = 0; + char *sbsr_pl; int i; int err; int err2; @@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); + mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; + for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) @@ -1674,6 +1686,10 @@ next_batch: for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; + if (local_port > last_local_port) { + current_page++; + goto do_query; + } if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, @@ -1715,7 +1731,7 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index); @@ -1732,7 +1748,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 84d4460f3dcd..20530712eadb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1491,7 +1491,7 @@ static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, static void mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, bool autoneg) + u16 local_port, u32 proto_admin, bool autoneg) { mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); } @@ -1969,7 +1969,7 @@ static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, static void mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, + u16 local_port, u32 proto_admin, bool autoneg) { mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 004c42274e48..ce80931f0402 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -317,13 +317,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid, } int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, - enum mlxsw_sp_flood_type packet_type, u8 local_port, + enum mlxsw_sp_flood_type packet_type, u16 local_port, bool member) { struct mlxsw_sp_fid_family *fid_family = fid->fid_family; const struct mlxsw_sp_fid_ops *ops = fid_family->ops; const struct mlxsw_sp_flood_table *flood_table; - char *sftr_pl; + char *sftr2_pl; int err; if (WARN_ON(!fid_family->flood_tables || !ops->flood_index)) @@ -333,16 +333,16 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, if (!flood_table) return -ESRCH; - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) + sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL); + if (!sftr2_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index, - ops->flood_index(fid), flood_table->table_type, 1, - local_port, member); - err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr), - sftr_pl); - kfree(sftr_pl); + mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index, + ops->flood_index(fid), flood_table->table_type, 1, + local_port, member); + err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2), + sftr2_pl); + kfree(sftr2_pl); return err; } @@ -439,7 +439,7 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index, } static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index, - u8 local_port, u16 vid, bool valid) + u16 local_port, u16 vid, bool valid) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; char svfa_pl[MLXSW_REG_SVFA_LEN]; @@ -573,7 +573,7 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err; err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, @@ -601,7 +601,7 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); @@ -784,7 +784,7 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err; /* We only need to transition the port to virtual mode since @@ -808,7 +808,7 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index be3791ca6069..bb417db773b9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -203,7 +203,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, */ burst = roundup_pow_of_two(act->police.burst); err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, - act->police.index, + act->hw_index, act->police.rate_bytes_ps, burst, extack); if (err) @@ -508,7 +508,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); - if (mlxsw_sp_flow_block_is_egress_bound(block)) { + if (mlxsw_sp_flow_block_is_egress_bound(block) && + match.mask->vlan_id) { NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index ad3926de88f2..01cf5a6a26bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -568,37 +568,21 @@ static int mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { - char rips_pl[MLXSW_REG_RIPS_LEN]; struct __ip6_tnl_parm parms6; - int err; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, - MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - &ipip_entry->dip_kvdl_index); - if (err) - return err; parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); - mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index, - &parms6.raddr); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); - if (err) - goto err_rips_write; - - return 0; - -err_rips_write: - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - ipip_entry->dip_kvdl_index); - return err; + return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr, + &ipip_entry->dip_kvdl_index); } static void mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_entry *ipip_entry) { - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, - ipip_entry->dip_kvdl_index); + struct __ip6_tnl_parm parms6; + + parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr); } static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 9eba8fa684ae..d2b57a045aa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -130,15 +130,25 @@ mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record, struct mlxsw_sp_nve_mc_entry *mc_entry, const union mlxsw_sp_l3addr *addr) { - WARN_ON(1); + u32 kvdl_index; + int err; + + err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp, + &addr->addr6, &kvdl_index); + if (err) + return err; - return -EINVAL; + mc_entry->ipv6_entry.addr6 = addr->addr6; + mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index; + return 0; } static void mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record, const struct mlxsw_sp_nve_mc_entry *mc_entry) { + mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp, + &mc_entry->ipv6_entry.addr6); } static void @@ -787,6 +797,142 @@ static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp, ops->fdb_clear_offload(nve_dev, vni); } +struct mlxsw_sp_nve_ipv6_ht_key { + u8 mac[ETH_ALEN]; + u16 fid_index; +}; + +struct mlxsw_sp_nve_ipv6_ht_node { + struct rhash_head ht_node; + struct list_head list; + struct mlxsw_sp_nve_ipv6_ht_key key; + struct in6_addr addr6; +}; + +static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = { + .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key), + .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key), + .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node), +}; + +int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6, + u32 *p_kvdl_index) +{ + return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index); +} + +void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp, + const struct in6_addr *addr6) +{ + mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6); +} + +static struct mlxsw_sp_nve_ipv6_ht_node * +mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_key key = {}; + + ether_addr_copy(key.mac, mac); + key.fid_index = fid_index; + return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key, + mlxsw_sp_nve_ipv6_ht_params); +} + +static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid_index, + const struct in6_addr *addr6) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + int err; + + ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL); + if (!ipv6_ht_node) + return -ENOMEM; + + ether_addr_copy(ipv6_ht_node->key.mac, mac); + ipv6_ht_node->key.fid_index = fid_index; + ipv6_ht_node->addr6 = *addr6; + + err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node, + mlxsw_sp_nve_ipv6_ht_params); + if (err) + goto err_rhashtable_insert; + + list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list); + + return 0; + +err_rhashtable_insert: + kfree(ipv6_ht_node); + return err; +} + +static void +mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node) +{ + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + list_del(&ipv6_ht_node->list); + rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node, + mlxsw_sp_nve_ipv6_ht_params); + kfree(ipv6_ht_node); +} + +int +mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index, + const struct in6_addr *new_addr6) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + + ASSERT_RTNL(); + + ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac, + fid_index); + if (!ipv6_ht_node) + return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index, + new_addr6); + + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6); + ipv6_ht_node->addr6 = *new_addr6; + return 0; +} + +void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node; + + ASSERT_RTNL(); + + ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac, + fid_index); + if (WARN_ON(!ipv6_ht_node)) + return; + + mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node); +} + +static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) +{ + struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp; + struct mlxsw_sp_nve *nve = mlxsw_sp->nve; + + list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list, + list) { + if (ipv6_ht_node->key.fid_index != fid_index) + continue; + + mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6); + mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node); + } +} + int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) @@ -845,6 +991,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); + mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index); if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) || mlxsw_sp_fid_vni(fid, &vni))) @@ -981,7 +1128,13 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp) err = rhashtable_init(&nve->mc_list_ht, &mlxsw_sp_nve_mc_list_ht_params); if (err) - goto err_rhashtable_init; + goto err_mc_rhashtable_init; + + err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params); + if (err) + goto err_ipv6_rhashtable_init; + + INIT_LIST_HEAD(&nve->ipv6_addr_list); err = mlxsw_sp_nve_qos_init(mlxsw_sp); if (err) @@ -1000,8 +1153,10 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp) err_nve_resources_query: err_nve_ecn_init: err_nve_qos_init: + rhashtable_destroy(&nve->ipv6_ht); +err_ipv6_rhashtable_init: rhashtable_destroy(&nve->mc_list_ht); -err_rhashtable_init: +err_mc_rhashtable_init: mlxsw_sp->nve = NULL; kfree(nve); return err; @@ -1010,6 +1165,8 @@ err_rhashtable_init: void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp) { WARN_ON(mlxsw_sp->nve->num_nve_tunnels); + WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list)); + rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht); rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht); kfree(mlxsw_sp->nve); mlxsw_sp->nve = NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 98d1fdc25eac..0d21de1d0395 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -23,6 +23,8 @@ struct mlxsw_sp_nve_config { struct mlxsw_sp_nve { struct mlxsw_sp_nve_config config; struct rhashtable mc_list_ht; + struct rhashtable ipv6_ht; + struct list_head ipv6_addr_list; /* Saves hash table nodes. */ struct mlxsw_sp *mlxsw_sp; const struct mlxsw_sp_nve_ops **nve_ops_arr; unsigned int num_nve_tunnels; /* Protected by RTNL */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index d018d2da5949..d309b77a0194 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -10,8 +10,48 @@ #include "spectrum.h" #include "spectrum_nve.h" -#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ +#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) +#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \ + VXLAN_F_UDP_ZERO_CSUM6_TX | \ + VXLAN_F_UDP_ZERO_CSUM6_RX) + +static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg, + struct netlink_ext_ack *extack) +{ + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); + return false; + } + + if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); + return false; + } + + return true; +} + +static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg, + struct netlink_ext_ack *extack) +{ + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX"); + return false; + } + + if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX"); + return false; + } + + if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); + return false; + } + + return true; +} static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, @@ -20,11 +60,6 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; - if (cfg->saddr.sa.sa_family != AF_INET) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported"); - return false; - } - if (vxlan_addr_multicast(&cfg->remote_ip)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported"); return false; @@ -55,14 +90,15 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return false; } - if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported"); - return false; - } - - if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag"); - return false; + switch (cfg->saddr.sa.sa_family) { + case AF_INET: + if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack)) + return false; + break; + case AF_INET6: + if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack)) + return false; + break; } if (cfg->ttl == 0) { @@ -90,6 +126,22 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack); } +static void +mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg, + struct mlxsw_sp_nve_config *config) +{ + switch (cfg->saddr.sa.sa_family) { + case AF_INET: + config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; + config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; + break; + case AF_INET6: + config->ul_proto = MLXSW_SP_L3_PROTO_IPV6; + config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr; + break; + } +} + static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config) @@ -102,8 +154,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->flowlabel = cfg->label; config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0; config->ul_tb_id = RT_TABLE_MAIN; - config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; - config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; + mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config); config->udp_dport = cfg->dst_port; } @@ -111,6 +162,7 @@ static void mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, const struct mlxsw_sp_nve_config *config) { + struct in6_addr addr6; u8 udp_sport; mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, @@ -122,7 +174,18 @@ mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, get_random_bytes(&udp_sport, sizeof(udp_sport)); udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); - mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); + + switch (config->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_tngcr_usipv4_set(tngcr_pl, + be32_to_cpu(config->ul_sip.addr4)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + addr6 = config->ul_sip.addr6; + mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl, + (const char *)&addr6); + break; + } } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index 1a180384e7e8..0ff163fbc775 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -36,7 +36,7 @@ struct mlxsw_sp_ptp_state { }; struct mlxsw_sp1_ptp_key { - u8 local_port; + u16 local_port; u8 message_type; u16 sequence_id; u8 domain_number; @@ -406,7 +406,7 @@ mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, * This case is similar to 2) above. */ static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port, + struct sk_buff *skb, u16 local_port, bool ingress, struct skb_shared_hwtstamps *hwtstamps) { @@ -524,7 +524,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port, + struct sk_buff *skb, u16 local_port, bool ingress) { struct mlxsw_sp_port *mlxsw_sp_port; @@ -564,7 +564,7 @@ immediate: } void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp) { @@ -599,14 +599,14 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, } void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port) + u16 local_port) { skb_reset_mac_header(skb); mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true); } void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 1d43a3755285..c06cd1384bca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -31,13 +31,13 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state); void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); + u16 local_port); void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port); + struct sk_buff *skb, u16 local_port); void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp); @@ -80,20 +80,20 @@ static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) } static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp); } static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { dev_kfree_skb_any(skb); } static inline void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, - u8 local_port, u8 message_type, + u16 local_port, u8 message_type, u8 domain_number, u16 sequence_id, u64 timestamp) { @@ -159,13 +159,13 @@ static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state) } static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp); } static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, - struct sk_buff *skb, u8 local_port) + struct sk_buff *skb, u16 local_port) { dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 217e3b351dfe..d40762cfc453 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1307,6 +1307,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id, addr_prefix_len = 32; break; case MLXSW_SP_L3_PROTO_IPV6: + addrp = &addr->addr6; + addr_len = 16; + addr_prefix_len = 128; + break; default: WARN_ON(1); return NULL; @@ -7002,6 +7006,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr }; + u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id); + struct mlxsw_sp_router *router = mlxsw_sp->router; int ifindex = nhgi->nexthops[0].ifindex; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -7015,6 +7021,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry, ipip_entry); } + if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id, + MLXSW_SP_L3_PROTO_IPV6, &dip)) { + u32 tunnel_index; + + tunnel_index = router->nve_decap_config.tunnel_index; + fib_entry->decap.tunnel_index = tunnel_index; + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP; + } return 0; } @@ -8369,9 +8383,6 @@ mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac) int id; idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) { - if (!profile) - continue; - if (ether_addr_equal_masked(profile->mac_prefix, mac, mlxsw_sp->mac_mask)) return profile; @@ -8494,7 +8505,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp, u8 mac_profile; int err; - if (!mlxsw_sp_rif_mac_profile_is_shared(rif)) + if (!mlxsw_sp_rif_mac_profile_is_shared(rif) && + !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac)) return mlxsw_sp_rif_mac_profile_edit(rif, new_mac); err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac, @@ -9343,7 +9355,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) +u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) { return mlxsw_core_max_ports(mlxsw_sp->core) + 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index f5f819aa9a65..f9671cc53002 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -37,7 +37,7 @@ struct mlxsw_sp_span { struct mlxsw_sp_span_analyzed_port { struct list_head list; /* Member of analyzed_ports_list */ refcount_t ref_count; - u8 local_port; + u16 local_port; bool ingress; }; @@ -46,7 +46,7 @@ struct mlxsw_sp_span_trigger_entry { struct mlxsw_sp_span *span; const struct mlxsw_sp_span_trigger_ops *ops; refcount_t ref_count; - u8 local_port; + u16 local_port; enum mlxsw_sp_span_trigger trigger; struct mlxsw_sp_span_trigger_parms parms; }; @@ -179,7 +179,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -199,7 +199,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -480,7 +480,7 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -584,7 +584,7 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -650,7 +650,7 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, { struct mlxsw_sp_port *dest_port = sparms.dest_port; struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; - u8 local_port = dest_port->local_port; + u16 local_port = dest_port->local_port; char mpat_pl[MLXSW_REG_MPAT_LEN]; int pa_id = span_entry->id; @@ -997,7 +997,7 @@ static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_por } static struct mlxsw_sp_span_analyzed_port * -mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port, +mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port, bool ingress) { struct mlxsw_sp_span_analyzed_port *analyzed_port; @@ -1165,7 +1165,7 @@ int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_analyzed_port *analyzed_port; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err = 0; mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); @@ -1193,7 +1193,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_span_analyzed_port *analyzed_port; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; mutex_lock(&mlxsw_sp->span->analyzed_ports_lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 81c7e8a7fcf5..65c1724c63b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -865,17 +865,17 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, bool add) { - char *smid_pl; + char *smid2_pl; int err; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, - mlxsw_sp_router_port(mlxsw_sp), add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, + mlxsw_sp_router_port(mlxsw_sp), add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -980,7 +980,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_bridge_device *bridge_device; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; struct mlxsw_sp_fid *fid; int err; @@ -1029,7 +1029,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; mlxsw_sp_port_vlan->fid = NULL; @@ -1290,38 +1290,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, - const char *mac, u16 fid, - enum mlxsw_sp_l3proto proto, - const union mlxsw_sp_l3addr *addr, - bool adding, bool dynamic) +static int +mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic, + const char *mac, u16 fid, __be32 addr, bool adding) { - enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; char *sfd_pl; u8 num_rec; u32 uip; int err; - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - uip = be32_to_cpu(addr->addr4); - sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON(1); - return -EOPNOTSUPP; - } + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + uip = be32_to_cpu(addr); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0, + mlxsw_sp_sfd_rec_policy(dynamic), mac, + fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + u32 kvdl_index, bool adding) +{ + char *sfd_pl; + u8 num_rec; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, - mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, uip, - sfd_proto); + mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1335,7 +1349,80 @@ out: return err; } -static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + u32 kvdl_index; + int err; + + err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index); + if (err) + return err; + + err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, + kvdl_index, true); + if (err) + goto err_sfd_write; + + err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr); + if (err) + /* Replace can fail only for creating new mapping, so removing + * the FDB entry in the error path is OK. + */ + goto err_addr_replace; + + return 0; + +err_addr_replace: + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index, + false); +err_sfd_write: + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); + return err; +} + +static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid); + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false); + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); +} + +static int +mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid, const struct in6_addr *addr, bool adding) +{ + if (adding) + return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid, + addr); + + mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr); + return 0; +} + +static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + bool adding, bool dynamic) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid, + addr->addr4, adding); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid, + &addr->addr6, adding); + default: + WARN_ON(1); + return -EOPNOTSUPP; + } +} + +static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, const char *mac, u16 fid, bool adding, enum mlxsw_reg_sfd_rec_action action, enum mlxsw_reg_sfd_rec_policy policy) @@ -1363,7 +1450,7 @@ out: return err; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, const char *mac, u16 fid, bool adding, bool dynamic) { @@ -1477,30 +1564,30 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, long *ports_bitmap, bool set_router_port) { - char *smid_pl; + char *smid2_pl; int err, i; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false); for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { if (mlxsw_sp->ports[i]) - mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1); } - mlxsw_reg_smid_port_mask_set(smid_pl, - mlxsw_sp_router_port(mlxsw_sp), 1); + mlxsw_reg_smid2_port_mask_set(smid2_pl, + mlxsw_sp_router_port(mlxsw_sp), 1); for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) - mlxsw_reg_smid_port_set(smid_pl, i, 1); + mlxsw_reg_smid2_port_set(smid2_pl, i, 1); - mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), - set_router_port); + mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp), + set_router_port); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -1508,16 +1595,16 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid_idx, bool add) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *smid_pl; + char *smid2_pl; int err; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) + smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL); + if (!smid2_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); + mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl); + kfree(smid2_pl); return err; } @@ -2536,7 +2623,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port; enum switchdev_notifier_type type; char mac[ETH_ALEN]; - u8 local_port; + u16 local_port; u16 vid, fid; bool do_notification = true; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 26d01adbedad..47b061b99160 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -60,7 +60,7 @@ enum { }; static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port, + u16 local_port, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port_pcpu_stats *pcpu_stats; @@ -85,7 +85,7 @@ static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, return 0; } -static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct devlink_port *in_devlink_port; @@ -109,7 +109,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port, consume_skb(skb); } -static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index; @@ -138,7 +138,7 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port, consume_skb(skb); } -static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, +static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct devlink_port *in_devlink_port; @@ -164,7 +164,7 @@ static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, return 0; } -static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { int err; @@ -176,14 +176,14 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port, netif_receive_skb(skb); } -static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { skb->offload_fwd_mark = 1; mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); } -static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { skb->offload_l3_fwd_mark = 1; @@ -191,7 +191,7 @@ static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port, mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx); } -static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); @@ -212,7 +212,7 @@ static struct mlxsw_sp_port * mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_rx_md_info *rx_md_info) { - u8 local_port; + u16 local_port; if (!rx_md_info->tx_port_valid) return NULL; @@ -257,7 +257,7 @@ static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp, md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT; } -static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); @@ -293,7 +293,7 @@ out: consume_skb(skb); } -static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info; @@ -343,7 +343,7 @@ out: consume_skb(skb); } -static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port, +static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port, void *trap_ctx) { struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx); diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c index 2e25798c610e..7f49042484bd 100644 --- a/drivers/net/ethernet/micrel/ks8851_par.c +++ b/drivers/net/ethernet/micrel/ks8851_par.c @@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev) return ret; netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) + return netdev->irq; return ks8851_probe_common(netdev, dev, msg_enable); } diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 99c0c1491af2..d024983815da 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6317,11 +6317,15 @@ static int netdev_set_pauseparam(struct net_device *dev, * netdev_get_ringparam - get tx/rx ring parameters * @dev: Network device. * @ring: Ethtool RING settings data structure. + * @kernel_ring: Ethtool external RING settings data structure. + * @extack: Netlink handle. * * This procedure returns the TX/RX ring settings. */ static void netdev_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 735eea1dacf1..ed7a35c3ceac 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -55,6 +55,7 @@ config LAN743X To compile this driver as a module, choose M here. The module will be called lan743x. +source "drivers/net/ethernet/microchip/lan966x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index c77dc0379bfd..9faa41436198 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_LAN743X) += lan743x.o lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o +obj-$(CONFIG_LAN966X_SWITCH) += lan966x/ obj-$(CONFIG_SPARX5_SWITCH) += sparx5/ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4fc97823bc84..8c6390d95158 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) } static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, - u8 duplex, u16 local_adv, - u16 remote_adv) + u16 local_adv, u16 remote_adv) { struct lan743x_phy *phy = &adapter->phy; u8 cap; @@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) phy_print_status(phydev); if (phydev->state == PHY_RUNNING) { - struct ethtool_link_ksettings ksettings; int remote_advertisement = 0; int local_advertisement = 0; @@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) } lan743x_csr_write(adapter, MAC_CR, data); - memset(&ksettings, 0, sizeof(ksettings)); - phy_ethtool_get_link_ksettings(netdev, &ksettings); local_advertisement = linkmode_adv_to_mii_adv_t(phydev->advertising); remote_advertisement = linkmode_adv_to_mii_adv_t(phydev->lp_advertising); - lan743x_phy_update_flowcontrol(adapter, - ksettings.base.duplex, - local_advertisement, + lan743x_phy_update_flowcontrol(adapter, local_advertisement, remote_advertisement); - lan743x_ptp_update_latency(adapter, ksettings.base.speed); + lan743x_ptp_update_latency(adapter, phydev->speed); } } @@ -1745,13 +1739,10 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx) } if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, DMA_BIT_MASK(64))) { - if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, - DMA_BIT_MASK(32))) { - dev_warn(&tx->adapter->pdev->dev, - "lan743x_: No suitable DMA available\n"); - ret = -ENOMEM; - goto cleanup; - } + dev_warn(&tx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; } ring_allocation_size = ALIGN(tx->ring_size * sizeof(struct lan743x_tx_descriptor), @@ -2290,13 +2281,10 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) } if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, DMA_BIT_MASK(64))) { - if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, - DMA_BIT_MASK(32))) { - dev_warn(&rx->adapter->pdev->dev, - "lan743x_: No suitable DMA available\n"); - ret = -ENOMEM; - goto cleanup; - } + dev_warn(&rx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; } ring_allocation_size = ALIGN(rx->ring_size * sizeof(struct lan743x_rx_descriptor), diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 9380e396f648..8b7a8d879083 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1305,12 +1305,6 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) { - netif_warn(adapter, drv, adapter->netdev, - "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n", - config.flags); - } - switch (config.tx_type) { case HWTSTAMP_TX_OFF: for (index = 0; index < LAN743X_MAX_TX_CHANNELS; diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig new file mode 100644 index 000000000000..ac273f84b69e --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Kconfig @@ -0,0 +1,9 @@ +config LAN966X_SWITCH + tristate "Lan966x switch driver" + depends on HAS_IOMEM + depends on OF + depends on NET_SWITCHDEV + select PHYLINK + select PACKING + help + This driver supports the Lan966x network switch device. diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile new file mode 100644 index 000000000000..040cfff9f577 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Microchip Lan966x network device drivers. +# + +obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o + +lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ + lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \ + lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c new file mode 100644 index 000000000000..614f12c2fe6a --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -0,0 +1,682 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> + +#include "lan966x_main.h" + +/* Number of traffic classes */ +#define LAN966X_NUM_TC 8 +#define LAN966X_STATS_CHECK_DELAY (2 * HZ) + +static const struct lan966x_stat_layout lan966x_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02 }, + { .name = "rx_broadcast", .offset = 0x03 }, + { .name = "rx_short", .offset = 0x04 }, + { .name = "rx_frag", .offset = 0x05 }, + { .name = "rx_jabber", .offset = 0x06 }, + { .name = "rx_crc", .offset = 0x07 }, + { .name = "rx_symbol_err", .offset = 0x08 }, + { .name = "rx_sz_64", .offset = 0x09 }, + { .name = "rx_sz_65_127", .offset = 0x0a}, + { .name = "rx_sz_128_255", .offset = 0x0b}, + { .name = "rx_sz_256_511", .offset = 0x0c }, + { .name = "rx_sz_512_1023", .offset = 0x0d }, + { .name = "rx_sz_1024_1526", .offset = 0x0e }, + { .name = "rx_sz_jumbo", .offset = 0x0f }, + { .name = "rx_pause", .offset = 0x10 }, + { .name = "rx_control", .offset = 0x11 }, + { .name = "rx_long", .offset = 0x12 }, + { .name = "rx_cat_drop", .offset = 0x13 }, + { .name = "rx_red_prio_0", .offset = 0x14 }, + { .name = "rx_red_prio_1", .offset = 0x15 }, + { .name = "rx_red_prio_2", .offset = 0x16 }, + { .name = "rx_red_prio_3", .offset = 0x17 }, + { .name = "rx_red_prio_4", .offset = 0x18 }, + { .name = "rx_red_prio_5", .offset = 0x19 }, + { .name = "rx_red_prio_6", .offset = 0x1a }, + { .name = "rx_red_prio_7", .offset = 0x1b }, + { .name = "rx_yellow_prio_0", .offset = 0x1c }, + { .name = "rx_yellow_prio_1", .offset = 0x1d }, + { .name = "rx_yellow_prio_2", .offset = 0x1e }, + { .name = "rx_yellow_prio_3", .offset = 0x1f }, + { .name = "rx_yellow_prio_4", .offset = 0x20 }, + { .name = "rx_yellow_prio_5", .offset = 0x21 }, + { .name = "rx_yellow_prio_6", .offset = 0x22 }, + { .name = "rx_yellow_prio_7", .offset = 0x23 }, + { .name = "rx_green_prio_0", .offset = 0x24 }, + { .name = "rx_green_prio_1", .offset = 0x25 }, + { .name = "rx_green_prio_2", .offset = 0x26 }, + { .name = "rx_green_prio_3", .offset = 0x27 }, + { .name = "rx_green_prio_4", .offset = 0x28 }, + { .name = "rx_green_prio_5", .offset = 0x29 }, + { .name = "rx_green_prio_6", .offset = 0x2a }, + { .name = "rx_green_prio_7", .offset = 0x2b }, + { .name = "rx_assembly_err", .offset = 0x2c }, + { .name = "rx_smd_err", .offset = 0x2d }, + { .name = "rx_assembly_ok", .offset = 0x2e }, + { .name = "rx_merge_frag", .offset = 0x2f }, + { .name = "rx_pmac_octets", .offset = 0x30, }, + { .name = "rx_pmac_unicast", .offset = 0x31, }, + { .name = "rx_pmac_multicast", .offset = 0x32 }, + { .name = "rx_pmac_broadcast", .offset = 0x33 }, + { .name = "rx_pmac_short", .offset = 0x34 }, + { .name = "rx_pmac_frag", .offset = 0x35 }, + { .name = "rx_pmac_jabber", .offset = 0x36 }, + { .name = "rx_pmac_crc", .offset = 0x37 }, + { .name = "rx_pmac_symbol_err", .offset = 0x38 }, + { .name = "rx_pmac_sz_64", .offset = 0x39 }, + { .name = "rx_pmac_sz_65_127", .offset = 0x3a }, + { .name = "rx_pmac_sz_128_255", .offset = 0x3b }, + { .name = "rx_pmac_sz_256_511", .offset = 0x3c }, + { .name = "rx_pmac_sz_512_1023", .offset = 0x3d }, + { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e }, + { .name = "rx_pmac_sz_jumbo", .offset = 0x3f }, + { .name = "rx_pmac_pause", .offset = 0x40 }, + { .name = "rx_pmac_control", .offset = 0x41 }, + { .name = "rx_pmac_long", .offset = 0x42 }, + + { .name = "tx_octets", .offset = 0x80, }, + { .name = "tx_unicast", .offset = 0x81, }, + { .name = "tx_multicast", .offset = 0x82 }, + { .name = "tx_broadcast", .offset = 0x83 }, + { .name = "tx_col", .offset = 0x84 }, + { .name = "tx_drop", .offset = 0x85 }, + { .name = "tx_pause", .offset = 0x86 }, + { .name = "tx_sz_64", .offset = 0x87 }, + { .name = "tx_sz_65_127", .offset = 0x88 }, + { .name = "tx_sz_128_255", .offset = 0x89 }, + { .name = "tx_sz_256_511", .offset = 0x8a }, + { .name = "tx_sz_512_1023", .offset = 0x8b }, + { .name = "tx_sz_1024_1526", .offset = 0x8c }, + { .name = "tx_sz_jumbo", .offset = 0x8d }, + { .name = "tx_yellow_prio_0", .offset = 0x8e }, + { .name = "tx_yellow_prio_1", .offset = 0x8f }, + { .name = "tx_yellow_prio_2", .offset = 0x90 }, + { .name = "tx_yellow_prio_3", .offset = 0x91 }, + { .name = "tx_yellow_prio_4", .offset = 0x92 }, + { .name = "tx_yellow_prio_5", .offset = 0x93 }, + { .name = "tx_yellow_prio_6", .offset = 0x94 }, + { .name = "tx_yellow_prio_7", .offset = 0x95 }, + { .name = "tx_green_prio_0", .offset = 0x96 }, + { .name = "tx_green_prio_1", .offset = 0x97 }, + { .name = "tx_green_prio_2", .offset = 0x98 }, + { .name = "tx_green_prio_3", .offset = 0x99 }, + { .name = "tx_green_prio_4", .offset = 0x9a }, + { .name = "tx_green_prio_5", .offset = 0x9b }, + { .name = "tx_green_prio_6", .offset = 0x9c }, + { .name = "tx_green_prio_7", .offset = 0x9d }, + { .name = "tx_aged", .offset = 0x9e }, + { .name = "tx_llct", .offset = 0x9f }, + { .name = "tx_ct", .offset = 0xa0 }, + { .name = "tx_mm_hold", .offset = 0xa1 }, + { .name = "tx_merge_frag", .offset = 0xa2 }, + { .name = "tx_pmac_octets", .offset = 0xa3, }, + { .name = "tx_pmac_unicast", .offset = 0xa4, }, + { .name = "tx_pmac_multicast", .offset = 0xa5 }, + { .name = "tx_pmac_broadcast", .offset = 0xa6 }, + { .name = "tx_pmac_pause", .offset = 0xa7 }, + { .name = "tx_pmac_sz_64", .offset = 0xa8 }, + { .name = "tx_pmac_sz_65_127", .offset = 0xa9 }, + { .name = "tx_pmac_sz_128_255", .offset = 0xaa }, + { .name = "tx_pmac_sz_256_511", .offset = 0xab }, + { .name = "tx_pmac_sz_512_1023", .offset = 0xac }, + { .name = "tx_pmac_sz_1024_1526", .offset = 0xad }, + { .name = "tx_pmac_sz_jumbo", .offset = 0xae }, + + { .name = "dr_local", .offset = 0x100 }, + { .name = "dr_tail", .offset = 0x101 }, + { .name = "dr_yellow_prio_0", .offset = 0x102 }, + { .name = "dr_yellow_prio_1", .offset = 0x103 }, + { .name = "dr_yellow_prio_2", .offset = 0x104 }, + { .name = "dr_yellow_prio_3", .offset = 0x105 }, + { .name = "dr_yellow_prio_4", .offset = 0x106 }, + { .name = "dr_yellow_prio_5", .offset = 0x107 }, + { .name = "dr_yellow_prio_6", .offset = 0x108 }, + { .name = "dr_yellow_prio_7", .offset = 0x109 }, + { .name = "dr_green_prio_0", .offset = 0x10a }, + { .name = "dr_green_prio_1", .offset = 0x10b }, + { .name = "dr_green_prio_2", .offset = 0x10c }, + { .name = "dr_green_prio_3", .offset = 0x10d }, + { .name = "dr_green_prio_4", .offset = 0x10e }, + { .name = "dr_green_prio_5", .offset = 0x10f }, + { .name = "dr_green_prio_6", .offset = 0x110 }, + { .name = "dr_green_prio_7", .offset = 0x111 }, +}; + +/* The following numbers are indexes into lan966x_stats_layout[] */ +#define SYS_COUNT_RX_OCT 0 +#define SYS_COUNT_RX_UC 1 +#define SYS_COUNT_RX_MC 2 +#define SYS_COUNT_RX_BC 3 +#define SYS_COUNT_RX_SHORT 4 +#define SYS_COUNT_RX_FRAG 5 +#define SYS_COUNT_RX_JABBER 6 +#define SYS_COUNT_RX_CRC 7 +#define SYS_COUNT_RX_SYMBOL_ERR 8 +#define SYS_COUNT_RX_SZ_64 9 +#define SYS_COUNT_RX_SZ_65_127 10 +#define SYS_COUNT_RX_SZ_128_255 11 +#define SYS_COUNT_RX_SZ_256_511 12 +#define SYS_COUNT_RX_SZ_512_1023 13 +#define SYS_COUNT_RX_SZ_1024_1526 14 +#define SYS_COUNT_RX_SZ_JUMBO 15 +#define SYS_COUNT_RX_PAUSE 16 +#define SYS_COUNT_RX_CONTROL 17 +#define SYS_COUNT_RX_LONG 18 +#define SYS_COUNT_RX_CAT_DROP 19 +#define SYS_COUNT_RX_RED_PRIO_0 20 +#define SYS_COUNT_RX_RED_PRIO_1 21 +#define SYS_COUNT_RX_RED_PRIO_2 22 +#define SYS_COUNT_RX_RED_PRIO_3 23 +#define SYS_COUNT_RX_RED_PRIO_4 24 +#define SYS_COUNT_RX_RED_PRIO_5 25 +#define SYS_COUNT_RX_RED_PRIO_6 26 +#define SYS_COUNT_RX_RED_PRIO_7 27 +#define SYS_COUNT_RX_YELLOW_PRIO_0 28 +#define SYS_COUNT_RX_YELLOW_PRIO_1 29 +#define SYS_COUNT_RX_YELLOW_PRIO_2 30 +#define SYS_COUNT_RX_YELLOW_PRIO_3 31 +#define SYS_COUNT_RX_YELLOW_PRIO_4 32 +#define SYS_COUNT_RX_YELLOW_PRIO_5 33 +#define SYS_COUNT_RX_YELLOW_PRIO_6 34 +#define SYS_COUNT_RX_YELLOW_PRIO_7 35 +#define SYS_COUNT_RX_GREEN_PRIO_0 36 +#define SYS_COUNT_RX_GREEN_PRIO_1 37 +#define SYS_COUNT_RX_GREEN_PRIO_2 38 +#define SYS_COUNT_RX_GREEN_PRIO_3 39 +#define SYS_COUNT_RX_GREEN_PRIO_4 40 +#define SYS_COUNT_RX_GREEN_PRIO_5 41 +#define SYS_COUNT_RX_GREEN_PRIO_6 42 +#define SYS_COUNT_RX_GREEN_PRIO_7 43 +#define SYS_COUNT_RX_ASSEMBLY_ERR 44 +#define SYS_COUNT_RX_SMD_ERR 45 +#define SYS_COUNT_RX_ASSEMBLY_OK 46 +#define SYS_COUNT_RX_MERGE_FRAG 47 +#define SYS_COUNT_RX_PMAC_OCT 48 +#define SYS_COUNT_RX_PMAC_UC 49 +#define SYS_COUNT_RX_PMAC_MC 50 +#define SYS_COUNT_RX_PMAC_BC 51 +#define SYS_COUNT_RX_PMAC_SHORT 52 +#define SYS_COUNT_RX_PMAC_FRAG 53 +#define SYS_COUNT_RX_PMAC_JABBER 54 +#define SYS_COUNT_RX_PMAC_CRC 55 +#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56 +#define SYS_COUNT_RX_PMAC_SZ_64 57 +#define SYS_COUNT_RX_PMAC_SZ_65_127 58 +#define SYS_COUNT_RX_PMAC_SZ_128_255 59 +#define SYS_COUNT_RX_PMAC_SZ_256_511 60 +#define SYS_COUNT_RX_PMAC_SZ_512_1023 61 +#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62 +#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63 +#define SYS_COUNT_RX_PMAC_PAUSE 64 +#define SYS_COUNT_RX_PMAC_CONTROL 65 +#define SYS_COUNT_RX_PMAC_LONG 66 + +#define SYS_COUNT_TX_OCT 67 +#define SYS_COUNT_TX_UC 68 +#define SYS_COUNT_TX_MC 69 +#define SYS_COUNT_TX_BC 70 +#define SYS_COUNT_TX_COL 71 +#define SYS_COUNT_TX_DROP 72 +#define SYS_COUNT_TX_PAUSE 73 +#define SYS_COUNT_TX_SZ_64 74 +#define SYS_COUNT_TX_SZ_65_127 75 +#define SYS_COUNT_TX_SZ_128_255 76 +#define SYS_COUNT_TX_SZ_256_511 77 +#define SYS_COUNT_TX_SZ_512_1023 78 +#define SYS_COUNT_TX_SZ_1024_1526 79 +#define SYS_COUNT_TX_SZ_JUMBO 80 +#define SYS_COUNT_TX_YELLOW_PRIO_0 81 +#define SYS_COUNT_TX_YELLOW_PRIO_1 82 +#define SYS_COUNT_TX_YELLOW_PRIO_2 83 +#define SYS_COUNT_TX_YELLOW_PRIO_3 84 +#define SYS_COUNT_TX_YELLOW_PRIO_4 85 +#define SYS_COUNT_TX_YELLOW_PRIO_5 86 +#define SYS_COUNT_TX_YELLOW_PRIO_6 87 +#define SYS_COUNT_TX_YELLOW_PRIO_7 88 +#define SYS_COUNT_TX_GREEN_PRIO_0 89 +#define SYS_COUNT_TX_GREEN_PRIO_1 90 +#define SYS_COUNT_TX_GREEN_PRIO_2 91 +#define SYS_COUNT_TX_GREEN_PRIO_3 92 +#define SYS_COUNT_TX_GREEN_PRIO_4 93 +#define SYS_COUNT_TX_GREEN_PRIO_5 94 +#define SYS_COUNT_TX_GREEN_PRIO_6 95 +#define SYS_COUNT_TX_GREEN_PRIO_7 96 +#define SYS_COUNT_TX_AGED 97 +#define SYS_COUNT_TX_LLCT 98 +#define SYS_COUNT_TX_CT 99 +#define SYS_COUNT_TX_MM_HOLD 100 +#define SYS_COUNT_TX_MERGE_FRAG 101 +#define SYS_COUNT_TX_PMAC_OCT 102 +#define SYS_COUNT_TX_PMAC_UC 103 +#define SYS_COUNT_TX_PMAC_MC 104 +#define SYS_COUNT_TX_PMAC_BC 105 +#define SYS_COUNT_TX_PMAC_PAUSE 106 +#define SYS_COUNT_TX_PMAC_SZ_64 107 +#define SYS_COUNT_TX_PMAC_SZ_65_127 108 +#define SYS_COUNT_TX_PMAC_SZ_128_255 109 +#define SYS_COUNT_TX_PMAC_SZ_256_511 110 +#define SYS_COUNT_TX_PMAC_SZ_512_1023 111 +#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112 +#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113 + +#define SYS_COUNT_DR_LOCAL 114 +#define SYS_COUNT_DR_TAIL 115 +#define SYS_COUNT_DR_YELLOW_PRIO_0 116 +#define SYS_COUNT_DR_YELLOW_PRIO_1 117 +#define SYS_COUNT_DR_YELLOW_PRIO_2 118 +#define SYS_COUNT_DR_YELLOW_PRIO_3 119 +#define SYS_COUNT_DR_YELLOW_PRIO_4 120 +#define SYS_COUNT_DR_YELLOW_PRIO_5 121 +#define SYS_COUNT_DR_YELLOW_PRIO_6 122 +#define SYS_COUNT_DR_YELLOW_PRIO_7 123 +#define SYS_COUNT_DR_GREEN_PRIO_0 124 +#define SYS_COUNT_DR_GREEN_PRIO_1 125 +#define SYS_COUNT_DR_GREEN_PRIO_2 126 +#define SYS_COUNT_DR_GREEN_PRIO_3 127 +#define SYS_COUNT_DR_GREEN_PRIO_4 128 +#define SYS_COUNT_DR_GREEN_PRIO_5 129 +#define SYS_COUNT_DR_GREEN_PRIO_6 130 +#define SYS_COUNT_DR_GREEN_PRIO_7 131 + +/* Add a possibly wrapping 32 bit value to a 64 bit counter */ +static void lan966x_add_cnt(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +static void lan966x_stats_update(struct lan966x *lan966x) +{ + int i, j; + + mutex_lock(&lan966x->stats_lock); + + for (i = 0; i < lan966x->num_phys_ports; i++) { + uint idx = i * lan966x->num_stats; + + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i), + lan966x, SYS_STAT_CFG); + + for (j = 0; j < lan966x->num_stats; j++) { + u32 offset = lan966x->stats_layout[j].offset; + + lan966x_add_cnt(&lan966x->stats[idx++], + lan_rd(lan966x, SYS_CNT(offset))); + } + } + + mutex_unlock(&lan966x->stats_lock); +} + +static int lan966x_get_sset_count(struct net_device *dev, int sset) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return lan966x->num_stats; +} + +static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct lan966x_port *port = netdev_priv(netdev); + struct lan966x *lan966x = port->lan966x; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < lan966x->num_stats; i++) + memcpy(data + i * ETH_GSTRING_LEN, + lan966x->stats_layout[i].name, ETH_GSTRING_LEN); +} + +static void lan966x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int i; + + /* check and update now */ + lan966x_stats_update(lan966x); + + /* Copy all counters */ + for (i = 0; i < lan966x->num_stats; i++) + *data++ = lan966x->stats[port->chip_port * + lan966x->num_stats + i]; +} + +static void lan966x_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + mac_stats->FramesTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_UC] + + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->SingleCollisionFrames = + lan966x->stats[idx + SYS_COUNT_TX_COL]; + mac_stats->MultipleCollisionFrames = 0; + mac_stats->FramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_UC] + + lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->FrameCheckSequenceErrors = + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_CRC]; + mac_stats->AlignmentErrors = 0; + mac_stats->OctetsTransmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + mac_stats->FramesWithDeferredXmissions = + lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD]; + mac_stats->LateCollisions = 0; + mac_stats->FramesAbortedDueToXSColls = 0; + mac_stats->FramesLostDueToIntMACXmitError = 0; + mac_stats->CarrierSenseErrors = 0; + mac_stats->OctetsReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_OCT]; + mac_stats->FramesLostDueToIntMACRcvError = 0; + mac_stats->MulticastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_MC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC]; + mac_stats->BroadcastFramesXmittedOK = + lan966x->stats[idx + SYS_COUNT_TX_BC] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; + mac_stats->FramesWithExcessiveDeferral = 0; + mac_stats->MulticastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_MC]; + mac_stats->BroadcastFramesReceivedOK = + lan966x->stats[idx + SYS_COUNT_RX_BC]; + mac_stats->InRangeLengthErrors = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC]; + mac_stats->OutOfRangeLengthField = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + mac_stats->FrameTooLongErrors = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + + mutex_unlock(&lan966x->stats_lock); +} + +static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {} +}; + +static void lan966x_get_eth_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + + lan966x_stats_update(lan966x); + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + rmon_stats->undersize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT]; + rmon_stats->oversize_pkts = + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG]; + rmon_stats->fragments = + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG]; + rmon_stats->jabbers = + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER]; + rmon_stats->hist[0] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64]; + rmon_stats->hist[1] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127]; + rmon_stats->hist[2] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255]; + rmon_stats->hist[3] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511]; + rmon_stats->hist[4] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023]; + rmon_stats->hist[5] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + rmon_stats->hist[6] = + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526]; + + rmon_stats->hist_tx[0] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64]; + rmon_stats->hist_tx[1] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127]; + rmon_stats->hist_tx[2] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255]; + rmon_stats->hist_tx[3] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511]; + rmon_stats->hist_tx[4] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023]; + rmon_stats->hist_tx[5] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + rmon_stats->hist_tx[6] = + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526]; + + mutex_unlock(&lan966x->stats_lock); + + *ranges = lan966x_rmon_ranges; +} + +static int lan966x_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int lan966x_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct lan966x_port *port = netdev_priv(ndev); + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +static void lan966x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int lan966x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct lan966x_port *port = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +const struct ethtool_ops lan966x_ethtool_ops = { + .get_link_ksettings = lan966x_get_link_ksettings, + .set_link_ksettings = lan966x_set_link_ksettings, + .get_pauseparam = lan966x_get_pauseparam, + .set_pauseparam = lan966x_set_pauseparam, + .get_sset_count = lan966x_get_sset_count, + .get_strings = lan966x_get_strings, + .get_ethtool_stats = lan966x_get_ethtool_stats, + .get_eth_mac_stats = lan966x_get_eth_mac_stats, + .get_rmon_stats = lan966x_get_eth_rmon_stats, + .get_link = ethtool_op_get_link, +}; + +static void lan966x_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct lan966x *lan966x = container_of(del_work, struct lan966x, + stats_work); + + lan966x_stats_update(lan966x); + + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); +} + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 idx; + int i; + + idx = port->chip_port * lan966x->num_stats; + + mutex_lock(&lan966x->stats_lock); + + stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT]; + + stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO]; + + stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] + + lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC]; + + stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] + + lan966x->stats[idx + SYS_COUNT_RX_FRAG] + + lan966x->stats[idx + SYS_COUNT_RX_JABBER] + + lan966x->stats[idx + SYS_COUNT_RX_CRC] + + lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] + + lan966x->stats[idx + SYS_COUNT_RX_LONG]; + + stats->rx_dropped = dev->stats.rx_dropped + + lan966x->stats[idx + SYS_COUNT_RX_LONG] + + lan966x->stats[idx + SYS_COUNT_DR_LOCAL] + + lan966x->stats[idx + SYS_COUNT_DR_TAIL]; + + for (i = 0; i < LAN966X_NUM_TC; i++) { + stats->rx_dropped += + (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] + + lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]); + } + + /* Get Tx stats */ + stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; + + stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] + + lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO]; + + stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] + + lan966x->stats[idx + SYS_COUNT_TX_AGED]; + + stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL]; + + mutex_unlock(&lan966x->stats_lock); +} + +int lan966x_stats_init(struct lan966x *lan966x) +{ + char queue_name[32]; + + lan966x->stats_layout = lan966x_stats_layout; + lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout); + lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports * + lan966x->num_stats, + sizeof(u64), GFP_KERNEL); + if (!lan966x->stats) + return -ENOMEM; + + /* Init stats worker */ + mutex_init(&lan966x->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(lan966x->dev)); + lan966x->stats_queue = create_singlethread_workqueue(queue_name); + INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work); + queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, + LAN966X_STATS_CHECK_DELAY); + + return 0; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c new file mode 100644 index 000000000000..da5ca7188679 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_fdb_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + struct lan966x *lan966x; + unsigned long event; +}; + +struct lan966x_fdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u32 references; +}; + +static struct lan966x_fdb_entry * +lan966x_fdb_find_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) + return fdb_entry; + } + + return NULL; +} + +static void lan966x_fdb_add_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry; + + fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info); + if (fdb_entry) { + fdb_entry->references++; + return; + } + + fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL); + if (!fdb_entry) + return; + + ether_addr_copy(fdb_entry->mac, fdb_info->addr); + fdb_entry->vid = fdb_info->vid; + fdb_entry->references = 1; + list_add_tail(&fdb_entry->list, &lan966x->fdb_entries); +} + +static bool lan966x_fdb_del_entry(struct lan966x *lan966x, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, + list) { + if (fdb_entry->vid == fdb_info->vid && + ether_addr_equal(fdb_entry->mac, fdb_info->addr)) { + fdb_entry->references--; + if (!fdb_entry->references) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + return true; + } + break; + } + } + + return false; +} + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_fdb_entry *fdb_entry; + + list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) { + if (fdb_entry->vid != vid) + continue; + + lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid); + } +} + +static void lan966x_fdb_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_fdb_entry *fdb_entry, *tmp; + + list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) { + list_del(&fdb_entry->list); + kfree(fdb_entry); + } +} + +int lan966x_fdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->fdb_entries); + lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0); + if (!lan966x->fdb_work) + return -ENOMEM; + + return 0; +} + +void lan966x_fdb_deinit(struct lan966x *lan966x) +{ + destroy_workqueue(lan966x->fdb_work); + lan966x_fdb_purge_entries(lan966x); +} + +static void lan966x_fdb_event_work(struct work_struct *work) +{ + struct lan966x_fdb_event_work *fdb_work = + container_of(work, struct lan966x_fdb_event_work, work); + struct switchdev_notifier_fdb_info *fdb_info; + struct net_device *dev = fdb_work->dev; + struct lan966x_port *port; + struct lan966x *lan966x; + int ret; + + fdb_info = &fdb_work->fdb_info; + lan966x = fdb_work->lan966x; + + if (lan966x_netdevice_check(dev)) { + port = netdev_priv(dev); + + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_add_entry(lan966x, port, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (!fdb_info->added_by_user) + break; + lan966x_mac_del_entry(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } + } else { + if (!netif_is_bridge_master(dev)) + goto out; + + /* In case the bridge is called */ + switch (fdb_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + /* If there is no front port in this vlan, there is no + * point to copy the frame to CPU because it would be + * just dropped at later point. So add it only if + * there is a port but it is required to store the fdb + * entry for later point when a port actually gets in + * the vlan. + */ + lan966x_fdb_add_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + lan966x_mac_cpu_learn(lan966x, fdb_info->addr, + fdb_info->vid); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + ret = lan966x_fdb_del_entry(lan966x, fdb_info); + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, + fdb_info->vid)) + break; + + if (ret) + lan966x_mac_cpu_forget(lan966x, fdb_info->addr, + fdb_info->vid); + break; + } + } + +out: + kfree(fdb_work->fdb_info.addr); + kfree(fdb_work); + dev_put(dev); +} + +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_fdb_event_work *fdb_work; + + if (ctx && ctx != port) + return 0; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (lan966x_netdevice_check(orig_dev) && + !fdb_info->added_by_user) + break; + + fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC); + if (!fdb_work) + return -ENOMEM; + + fdb_work->dev = orig_dev; + fdb_work->lan966x = lan966x; + fdb_work->event = event; + INIT_WORK(&fdb_work->work, lan966x_fdb_event_work); + memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info)); + fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!fdb_work->fdb_info.addr) + goto err_addr_alloc; + + ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr); + dev_hold(orig_dev); + + queue_work(lan966x->fdb_work, &fdb_work->work); + break; + } + + return 0; +err_addr_alloc: + kfree(fdb_work); + return -ENOMEM; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h new file mode 100644 index 000000000000..ca3314789d18 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_IFH_H__ +#define __LAN966X_IFH_H__ + +/* Fields with description (*) should just be cleared upon injection + * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte) + */ + +#define IFH_LEN 7 + +/* Timestamp for frame */ +#define IFH_POS_TIMESTAMP 192 + +/* Bypass analyzer with a prefilled IFH */ +#define IFH_POS_BYPASS 191 + +/* Masqueraded injection with masq_port defining logical source port */ +#define IFH_POS_MASQ 190 + +/* Masqueraded port number for injection */ +#define IFH_POS_MASQ_PORT 186 + +/* Frame length (*) */ +#define IFH_POS_LEN 178 + +/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */ +#define IFH_POS_WRDMODE 176 + +/* Frame has 16 bits rtag removed compared to line data */ +#define IFH_POS_RTAG48 175 + +/* Frame has a redundancy tag */ +#define IFH_POS_HAS_RED_TAG 174 + +/* Frame has been cut through forwarded (*) */ +#define IFH_POS_CUTTHRU 173 + +/* Rewriter command */ +#define IFH_POS_REW_CMD 163 + +/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */ +#define IFH_POS_REW_OAM 162 + +/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN, + * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM). + */ +#define IFH_POS_PDU_TYPE 158 + +/* Update FCS before transmission */ +#define IFH_POS_FCS_UPD 157 + +/* Classified DSCP value of frame */ +#define IFH_POS_DSCP 151 + +/* Yellow indication */ +#define IFH_POS_DP 150 + +/* Process in RTE/inbound */ +#define IFH_POS_RTE_INB_UPDATE 149 + +/* Number of tags to pop from frame */ +#define IFH_POS_POP_CNT 147 + +/* Number of tags in front of the ethertype */ +#define IFH_POS_ETYPE_OFS 145 + +/* Logical source port of frame (*) */ +#define IFH_POS_SRCPORT 141 + +/* Sequence number in redundancy tag */ +#define IFH_POS_SEQ_NUM 120 + +/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */ +#define IFH_POS_TCI 103 + +/* Classified internal priority for queuing */ +#define IFH_POS_QOS_CLASS 100 + +/* Bit mask with eight cpu copy classses */ +#define IFH_POS_CPUQ 92 + +/* Relearn + learn flags (*) */ +#define IFH_POS_LEARN_FLAGS 90 + +/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */ +#define IFH_POS_SFLOW_ID 86 + +/* Set if an ACL/S2 rule was hit (*). + * Super priority: acl_hit=0 and acl_hit(4)=1. + */ +#define IFH_POS_ACL_HIT 85 + +/* S2 rule index hit (*) */ +#define IFH_POS_ACL_IDX 79 + +/* ISDX as classified by S1 */ +#define IFH_POS_ISDX 71 + +/* Destination ports for frame */ +#define IFH_POS_DSTS 62 + +/* Storm policer to be applied: None/Uni/Multi/Broad (*) */ +#define IFH_POS_FLOOD 60 + +/* Redundancy tag operation */ +#define IFH_POS_SEQ_OP 58 + +/* Classified internal priority for resourcemgt, tagging etc */ +#define IFH_POS_IPV 55 + +/* Frame is for AFI use */ +#define IFH_POS_AFI 54 + +/* Internal aging value (*) */ +#define IFH_POS_AGED 52 + +/* RTP Identifier */ +#define IFH_POS_RTP_ID 42 + +/* RTP MRPD flow */ +#define IFH_POS_RTP_SUBID 41 + +/* Profinet DataStatus or opcua GroupVersion MSB */ +#define IFH_POS_PN_DATA_STATUS 33 + +/* Profinet transfer status (1 iff the status is 0) */ +#define IFH_POS_PN_TRANSF_STATUS_ZERO 32 + +/* Profinet cycle counter or opcua NetworkMessageNumber */ +#define IFH_POS_PN_CC 16 + +#define IFH_WID_TIMESTAMP 32 +#define IFH_WID_BYPASS 1 +#define IFH_WID_MASQ 1 +#define IFH_WID_MASQ_PORT 4 +#define IFH_WID_LEN 14 +#define IFH_WID_WRDMODE 2 +#define IFH_WID_RTAG48 1 +#define IFH_WID_HAS_RED_TAG 1 +#define IFH_WID_CUTTHRU 1 +#define IFH_WID_REW_CMD 10 +#define IFH_WID_REW_OAM 1 +#define IFH_WID_PDU_TYPE 4 +#define IFH_WID_FCS_UPD 1 +#define IFH_WID_DSCP 6 +#define IFH_WID_DP 1 +#define IFH_WID_RTE_INB_UPDATE 1 +#define IFH_WID_POP_CNT 2 +#define IFH_WID_ETYPE_OFS 2 +#define IFH_WID_SRCPORT 4 +#define IFH_WID_SEQ_NUM 16 +#define IFH_WID_TCI 17 +#define IFH_WID_QOS_CLASS 3 +#define IFH_WID_CPUQ 8 +#define IFH_WID_LEARN_FLAGS 2 +#define IFH_WID_SFLOW_ID 4 +#define IFH_WID_ACL_HIT 1 +#define IFH_WID_ACL_IDX 6 +#define IFH_WID_ISDX 8 +#define IFH_WID_DSTS 9 +#define IFH_WID_FLOOD 2 +#define IFH_WID_SEQ_OP 2 +#define IFH_WID_IPV 3 +#define IFH_WID_AFI 1 +#define IFH_WID_AGED 2 +#define IFH_WID_RTP_ID 10 +#define IFH_WID_RTP_SUBID 1 +#define IFH_WID_PN_DATA_STATUS 8 +#define IFH_WID_PN_TRANSF_STATUS_ZERO 1 +#define IFH_WID_PN_CC 16 + +#endif /* __LAN966X_IFH_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c new file mode 100644 index 000000000000..ca5f1177963d --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -0,0 +1,469 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> +#include "lan966x_main.h" + +#define LAN966X_MAC_COLUMNS 4 +#define MACACCESS_CMD_IDLE 0 +#define MACACCESS_CMD_LEARN 1 +#define MACACCESS_CMD_FORGET 2 +#define MACACCESS_CMD_AGE 3 +#define MACACCESS_CMD_GET_NEXT 4 +#define MACACCESS_CMD_INIT 5 +#define MACACCESS_CMD_READ 6 +#define MACACCESS_CMD_WRITE 7 +#define MACACCESS_CMD_SYNC_GET_NEXT 8 + +#define LAN966X_MAC_INVALID_ROW -1 + +struct lan966x_mac_entry { + struct list_head list; + unsigned char mac[ETH_ALEN] __aligned(2); + u16 vid; + u16 port_index; + int row; +}; + +struct lan966x_mac_raw_entry { + u32 mach; + u32 macl; + u32 maca; + bool processed; +}; + +static int lan966x_mac_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_MACACCESS); +} + +static int lan966x_mac_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout(lan966x_mac_get_status, + lan966x, val, + (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) == + MACACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_mac_select(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + lan_wr(macl, lan966x, ANA_MACLDATA); + lan_wr(mach, lan966x, ANA_MACHDATA); +} + +static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a write command */ + lan_wr(ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_CHANGE2SW_SET(0) | + ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) | + ANA_MACACCESS_DEST_IDX_SET(pgid) | + ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +/* The mask of the front ports is encoded inside the mac parameter via a call + * to lan966x_mdb_encode_mac(). + */ +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6); + + return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type); +} + +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED); + + return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); +} + +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a forget command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET), + lan966x, ANA_MACACCESS); + + return lan966x_mac_wait_for_completion(lan966x); +} + +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); +} + +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid) +{ + return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED); +} + +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing) +{ + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2), + ANA_AUTOAGE_AGE_PERIOD, + lan966x, ANA_AUTOAGE); +} + +void lan966x_mac_init(struct lan966x *lan966x) +{ + /* Clear the MAC table */ + lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + spin_lock_init(&lan966x->mac_lock); + INIT_LIST_HEAD(&lan966x->mac_entries); +} + +static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct lan966x_mac_entry *mac_entry; + + mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL); + if (!mac_entry) + return NULL; + + memcpy(mac_entry->mac, mac, ETH_ALEN); + mac_entry->vid = vid; + mac_entry->port_index = port_index; + mac_entry->row = LAN966X_MAC_INVALID_ROW; + return mac_entry; +} + +static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid, u16 port_index) +{ + struct lan966x_mac_entry *res = NULL; + struct lan966x_mac_entry *mac_entry; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { + if (mac_entry->vid == vid && + ether_addr_equal(mac, mac_entry->mac) && + mac_entry->port_index == port_index) { + res = mac_entry; + break; + } + } + spin_unlock(&lan966x->mac_lock); + + return res; +} + +static int lan966x_mac_lookup(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + int ret; + + lan966x_mac_select(lan966x, mac, vid); + + /* Issue a read command */ + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ), + lan966x, ANA_MACACCESS); + + ret = lan966x_mac_wait_for_completion(lan966x); + if (ret) + return ret; + + return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS)); +} + +static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type, + const char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info = { 0 }; + + info.addr = mac; + info.vid = vid; + info.offloaded = true; + call_switchdev_notifiers(type, dev, &info.info, NULL); +} + +int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, + const unsigned char *addr, u16 vid) +{ + struct lan966x_mac_entry *mac_entry; + + if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) + return 0; + + /* In case the entry already exists, don't add it again to SW, + * just update HW, but we need to look in the actual HW because + * it is possible for an entry to be learn by HW and before we + * get the interrupt the frame will reach CPU and the CPU will + * add the entry but without the extern_learn flag. + */ + mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); + if (mac_entry) + return lan966x_mac_learn(lan966x, port->chip_port, + addr, vid, ENTRYTYPE_LOCKED); + + mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port); + if (!mac_entry) + return -ENOMEM; + + spin_lock(&lan966x->mac_lock); + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); + lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev); + + return 0; +} + +int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, + u16 vid) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + if (mac_entry->vid == vid && + ether_addr_equal(addr, mac_entry->mac)) { + lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); + + return 0; +} + +void lan966x_mac_purge_entries(struct lan966x *lan966x) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, + list) { + lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, + ENTRYTYPE_LOCKED); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + spin_unlock(&lan966x->mac_lock); +} + +static void lan966x_mac_notifiers(enum switchdev_notifier_type type, + unsigned char *mac, u32 vid, + struct net_device *dev) +{ + rtnl_lock(); + lan966x_fdb_call_notifiers(type, mac, vid, dev); + rtnl_unlock(); +} + +static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry, + u8 *mac, u16 *vid, u32 *dest_idx) +{ + mac[0] = (raw_entry->mach >> 8) & 0xff; + mac[1] = (raw_entry->mach >> 0) & 0xff; + mac[2] = (raw_entry->macl >> 24) & 0xff; + mac[3] = (raw_entry->macl >> 16) & 0xff; + mac[4] = (raw_entry->macl >> 8) & 0xff; + mac[5] = (raw_entry->macl >> 0) & 0xff; + + *vid = (raw_entry->mach >> 16) & 0xfff; + *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca); +} + +static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, + struct lan966x_mac_raw_entry *raw_entries) +{ + struct lan966x_mac_entry *mac_entry, *tmp; + unsigned char mac[ETH_ALEN] __aligned(2); + u32 dest_idx; + u32 column; + u16 vid; + + spin_lock(&lan966x->mac_lock); + list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { + bool found = false; + + if (mac_entry->row != row) + continue; + + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, + * so when get one invalid entry it can just skip the + * rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + WARN_ON(dest_idx > lan966x->num_phys_ports); + + /* If the entry in SW is found, then there is nothing + * to do + */ + if (mac_entry->vid == vid && + ether_addr_equal(mac_entry->mac, mac) && + mac_entry->port_index == dest_idx) { + raw_entries[column].processed = true; + found = true; + break; + } + } + + if (!found) { + /* Notify the bridge that the entry doesn't exist + * anymore in the HW and remove the entry from the SW + * list + */ + lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mac_entry->mac, mac_entry->vid, + lan966x->ports[mac_entry->port_index]->dev); + + list_del(&mac_entry->list); + kfree(mac_entry); + } + } + spin_unlock(&lan966x->mac_lock); + + /* Now go to the list of columns and see if any entry was not in the SW + * list, then that means that the entry is new so it needs to notify the + * bridge. + */ + for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) { + /* All the valid entries are at the start of the row, so when + * get one invalid entry it can just skip the rest of the columns + */ + if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca)) + break; + + /* If the entry already exists then don't do anything */ + if (raw_entries[column].processed) + continue; + + lan966x_mac_process_raw_entry(&raw_entries[column], + mac, &vid, &dest_idx); + WARN_ON(dest_idx > lan966x->num_phys_ports); + + mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); + if (!mac_entry) + return; + + mac_entry->row = row; + + spin_lock(&lan966x->mac_lock); + list_add_tail(&mac_entry->list, &lan966x->mac_entries); + spin_unlock(&lan966x->mac_lock); + + lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + mac, vid, lan966x->ports[dest_idx]->dev); + } +} + +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) +{ + struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 }; + u32 index, column; + bool stop = true; + u32 val; + + /* Start the scan from 0, 0 */ + lan_wr(ANA_MACTINDX_M_INDEX_SET(0) | + ANA_MACTINDX_BUCKET_SET(0), + lan966x, ANA_MACTINDX); + + while (1) { + lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), + ANA_MACACCESS_MAC_TABLE_CMD, + lan966x, ANA_MACACCESS); + lan966x_mac_wait_for_completion(lan966x); + + val = lan_rd(lan966x, ANA_MACTINDX); + index = ANA_MACTINDX_M_INDEX_GET(val); + column = ANA_MACTINDX_BUCKET_GET(val); + + /* The SYNC-GET-NEXT returns all the entries(4) in a row in + * which is suffered a change. By change it means that new entry + * was added or an entry was removed because of ageing. + * It would return all the columns for that row. And after that + * it would return the next row The stop conditions of the + * SYNC-GET-NEXT is when it reaches 'directly' to row 0 + * column 3. So if SYNC-GET-NEXT returns row 0 and column 0 + * then it is required to continue to read more even if it + * reaches row 0 and column 3. + */ + if (index == 0 && column == 0) + stop = false; + + if (column == LAN966X_MAC_COLUMNS - 1 && + index == 0 && stop) + break; + + entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); + entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); + entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); + + /* Once all the columns are read process them */ + if (column == LAN966X_MAC_COLUMNS - 1) { + lan966x_mac_irq_process(lan966x, index, entry); + /* A row was processed so it is safe to assume that the + * next row/column can be the stop condition + */ + stop = true; + } + } + + lan_rmw(ANA_ANAINTR_INTR_SET(0), + ANA_ANAINTR_INTR, + lan966x, ANA_ANAINTR); + + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c new file mode 100644 index 000000000000..2cb70da63db3 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/packing.h> +#include <linux/phy/phy.h> +#include <linux/reset.h> + +#include "lan966x_main.h" + +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define READL_SLEEP_US 10 +#define READL_TIMEOUT_US 100000000 + +#define IO_RANGES 2 + +static const struct of_device_id lan966x_match[] = { + { .compatible = "microchip,lan966x-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, lan966x_match); + +struct lan966x_main_io_resource { + enum lan966x_target id; + phys_addr_t offset; + int range; +}; + +static const struct lan966x_main_io_resource lan966x_main_iomap[] = { + { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */ + { TARGET_ORG, 0, 1 }, /* 0xe2000000 */ + { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */ + { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */ + { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */ + { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */ + { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */ + { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */ + { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */ + { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */ + { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */ + { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */ + { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */ + { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */ + { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */ + { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */ + { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */ + { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */ +}; + +static int lan966x_create_targets(struct platform_device *pdev, + struct lan966x *lan966x) +{ + struct resource *iores[IO_RANGES]; + void __iomem *begin[IO_RANGES]; + int idx; + + /* Initially map the entire range and after that update each target to + * point inside the region at the correct offset. It is possible that + * other devices access the same region so don't add any checks about + * this. + */ + for (idx = 0; idx < IO_RANGES; idx++) { + iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM, + idx); + if (!iores[idx]) { + dev_err(&pdev->dev, "Invalid resource\n"); + return -EINVAL; + } + + begin[idx] = devm_ioremap(&pdev->dev, + iores[idx]->start, + resource_size(iores[idx])); + if (!begin[idx]) { + dev_err(&pdev->dev, "Unable to get registers: %s\n", + iores[idx]->name); + return -ENOMEM; + } + } + + for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) { + const struct lan966x_main_io_resource *iomap = + &lan966x_main_iomap[idx]; + + lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset; + } + + return 0; +} + +static int lan966x_port_set_mac_address(struct net_device *dev, void *p) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + const struct sockaddr *addr = p; + int ret; + + /* Learn the new net device MAC address in the mac table. */ + ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID); + if (ret) + return ret; + + /* Then forget the previous one. */ + ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID); + if (ret) + return ret; + + eth_hw_addr_set(dev, addr->sa_data); + return ret; +} + +static int lan966x_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct lan966x_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->chip_port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int lan966x_port_open(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + int err; + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) | + ANA_PORT_CFG_RECV_ENA_SET(1) | + ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port), + ANA_PORT_CFG_LEARNAUTO | + ANA_PORT_CFG_RECV_ENA | + ANA_PORT_CFG_PORTID_VAL, + lan966x, ANA_PORT_CFG(port->chip_port)); + + err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + phylink_start(port->phylink); + + return 0; +} + +static int lan966x_port_stop(struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + + lan966x_port_config_down(port); + phylink_stop(port->phylink); + phylink_disconnect_phy(port->phylink); + + return 0; +} + +static int lan966x_port_inj_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QS_INJ_STATUS); +} + +static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + return readx_poll_timeout(lan966x_port_inj_status, lan966x, val, + QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp), + READL_SLEEP_US, READL_TIMEOUT_US); +} + +static int lan966x_port_ifh_xmit(struct sk_buff *skb, + __be32 *ifh, + struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + u32 i, count, last; + u8 grp = 0; + u32 val; + int err; + + val = lan_rd(lan966x, QS_INJ_STATUS); + if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) || + (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp))) + return NETDEV_TX_BUSY; + + /* Write start of frame */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_SOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Write IFH header */ + for (i = 0; i < IFH_LEN; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + return NETDEV_TX_BUSY; + + lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp)); + } + + /* Write frame */ + count = DIV_ROUND_UP(skb->len, 4); + last = skb->len % 4; + for (i = 0; i < count; ++i) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + return NETDEV_TX_BUSY; + + lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp)); + } + + /* Add padding */ + while (i < (LAN966X_BUFFER_MIN_SZ / 4)) { + /* Wait until the fifo is ready */ + err = lan966x_port_inj_ready(lan966x, grp); + if (err) + return NETDEV_TX_BUSY; + + lan_wr(0, lan966x, QS_INJ_WR(grp)); + ++i; + } + + /* Inidcate EOF and valid bytes in the last word */ + lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | + QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ? + 0 : last) | + QS_INJ_CTRL_EOF_SET(1), + lan966x, QS_INJ_CTRL(grp)); + + /* Add dummy CRC */ + lan_wr(0, lan966x, QS_INJ_WR(grp)); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + dev_consume_skb_any(skb); + return NETDEV_TX_OK; +} + +static void lan966x_ifh_set_bypass(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1, + IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_port(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1, + IFH_POS_DSTS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1, + IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_ipv(void *ifh, u64 bypass) +{ + packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1, + IFH_POS_IPV, IFH_LEN * 4, PACK, 0); +} + +static void lan966x_ifh_set_vid(void *ifh, u64 vid) +{ + packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1, + IFH_POS_TCI, IFH_LEN * 4, PACK, 0); +} + +static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + __be32 ifh[IFH_LEN]; + + memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); + + lan966x_ifh_set_bypass(ifh, 1); + lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); + lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority); + lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb)); + + return lan966x_port_ifh_xmit(skb, ifh, dev); +} + +static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = new_mtu; + + return 0; +} + +static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED); +} + +static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID); +} + +static void lan966x_port_set_rx_mode(struct net_device *dev) +{ + __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync); +} + +static int lan966x_port_get_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + ppid->id_len = sizeof(lan966x->base_mac); + memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len); + + return 0; +} + +static const struct net_device_ops lan966x_port_netdev_ops = { + .ndo_open = lan966x_port_open, + .ndo_stop = lan966x_port_stop, + .ndo_start_xmit = lan966x_port_xmit, + .ndo_change_mtu = lan966x_port_change_mtu, + .ndo_set_rx_mode = lan966x_port_set_rx_mode, + .ndo_get_phys_port_name = lan966x_port_get_phys_port_name, + .ndo_get_stats64 = lan966x_stats_get, + .ndo_set_mac_address = lan966x_port_set_mac_address, + .ndo_get_port_parent_id = lan966x_port_get_parent_id, +}; + +bool lan966x_netdevice_check(const struct net_device *dev) +{ + return dev->netdev_ops == &lan966x_port_netdev_ops; +} + +static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp) +{ + return lan_rd(lan966x, QS_XTR_RD(grp)); +} + +static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp) +{ + u32 val; + + return read_poll_timeout(lan966x_port_xtr_status, val, + val != XTR_NOT_READY, + READL_SLEEP_US, READL_TIMEOUT_US, false, + lan966x, grp); +} + +static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval) +{ + u32 bytes_valid; + u32 val; + int err; + + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_NOT_READY) { + err = lan966x_port_xtr_ready(lan966x, grp); + if (err) + return -EIO; + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = lan_rd(lan966x, QS_XTR_RD(grp)); + if (val == XTR_ESCAPE) + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = lan_rd(lan966x, QS_XTR_RD(grp)); + + return 4; + default: + *rval = val; + + return 4; + } +} + +static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) +{ + packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1, + IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0); +} + +static void lan966x_ifh_get_len(void *ifh, u64 *len) +{ + packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1, + IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0); +} + +static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + int i, grp = 0, err = 0; + + if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp))) + return IRQ_NONE; + + do { + struct net_device *dev; + struct sk_buff *skb; + int sz = 0, buf_len; + u64 src_port, len; + u32 ifh[IFH_LEN]; + u32 *buf; + u32 val; + + for (i = 0; i < IFH_LEN; i++) { + err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]); + if (err != 4) + goto recover; + } + + err = 0; + + lan966x_ifh_get_src_port(ifh, &src_port); + lan966x_ifh_get_len(ifh, &len); + + WARN_ON(src_port >= lan966x->num_phys_ports); + + dev = lan966x->ports[src_port]->dev; + skb = netdev_alloc_skb(dev, len); + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; + break; + } + buf_len = len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); + + len = 0; + do { + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + *buf++ = val; + len += sz; + } while (len < buf_len); + + /* Read the FCS */ + sz = lan966x_rx_frame_word(lan966x, grp, &val); + if (sz < 0) { + kfree_skb(skb); + goto recover; + } + + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; + + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + + skb->protocol = eth_type_trans(skb, dev); + + if (lan966x->bridge_mask & BIT(src_port)) + skb->offload_fwd_mark = 1; + + netif_rx_ni(skb); + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + +recover: + if (sz < 0 || err) + lan_rd(lan966x, QS_XTR_RD(grp)); + + } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)); + + return IRQ_HANDLED; +} + +static irqreturn_t lan966x_ana_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + + return lan966x_mac_irq_handler(lan966x); +} + +static void lan966x_cleanup_ports(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int p; + + for (p = 0; p < lan966x->num_phys_ports; p++) { + port = lan966x->ports[p]; + if (!port) + continue; + + if (port->dev) + unregister_netdev(port->dev); + + if (port->phylink) { + rtnl_lock(); + lan966x_port_stop(port->dev); + rtnl_unlock(); + phylink_destroy(port->phylink); + port->phylink = NULL; + } + + if (port->fwnode) + fwnode_handle_put(port->fwnode); + } + + disable_irq(lan966x->xtr_irq); + lan966x->xtr_irq = -ENXIO; + + if (lan966x->ana_irq) { + disable_irq(lan966x->ana_irq); + lan966x->ana_irq = -ENXIO; + } +} + +static int lan966x_probe_port(struct lan966x *lan966x, u32 p, + phy_interface_t phy_mode, + struct fwnode_handle *portnp) +{ + struct lan966x_port *port; + struct phylink *phylink; + struct net_device *dev; + int err; + + if (p >= lan966x->num_phys_ports) + return -EINVAL; + + dev = devm_alloc_etherdev_mqs(lan966x->dev, + sizeof(struct lan966x_port), 8, 1); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, lan966x->dev); + port = netdev_priv(dev); + port->dev = dev; + port->lan966x = lan966x; + port->chip_port = p; + lan966x->ports[p] = port; + + dev->max_mtu = ETH_MAX_MTU; + + dev->netdev_ops = &lan966x_port_netdev_ops; + dev->ethtool_ops = &lan966x_ethtool_ops; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->needed_headroom = IFH_LEN * sizeof(u32); + + eth_hw_addr_gen(dev, lan966x->base_mac, p + 1); + + lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID, + ENTRYTYPE_LOCKED); + + port->phylink_config.dev = &port->dev->dev; + port->phylink_config.type = PHYLINK_NETDEV; + port->phylink_pcs.poll = true; + port->phylink_pcs.ops = &lan966x_phylink_pcs_ops; + + port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + port->phylink_config.supported_interfaces); + + phylink = phylink_create(&port->phylink_config, + portnp, + phy_mode, + &lan966x_phylink_mac_ops); + if (IS_ERR(phylink)) { + port->dev = NULL; + return PTR_ERR(phylink); + } + + port->phylink = phylink; + phylink_set_pcs(phylink, &port->phylink_pcs); + + err = register_netdev(dev); + if (err) { + dev_err(lan966x->dev, "register_netdev failed\n"); + return err; + } + + lan966x_vlan_port_set_vlan_aware(port, 0); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); + + return 0; +} + +static void lan966x_init(struct lan966x *lan966x) +{ + u32 p, i; + + /* MAC table initialization */ + lan966x_mac_init(lan966x); + + lan966x_vlan_init(lan966x); + + /* Flush queues */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) | + GENMASK(1, 0), + lan966x, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) & + ~(GENMASK(1, 0)), + lan966x, QS_XTR_FLUSH); + + /* Set MAC age time to default value, the entry is aged after + * 2 * AGE_PERIOD + */ + lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ), + lan966x, ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1), + ANA_ADVLEARN_VLAN_CHK, + lan966x, ANA_ADVLEARN); + + /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */ + lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) | + (20000000 / 65), + lan966x, SYS_FRM_AGING); + + /* Map the 8 CPU extraction queues to CPU port */ + lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP); + + /* Do byte-swap and expect status after last data word + * Extraction: Mode: manual extraction) | Byte_swap + */ + lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_XTR_GRP_CFG(0)); + + /* Injection: Mode: manual injection | Byte_swap */ + lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), + lan966x, QS_INJ_GRP_CFG(0)); + + lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0), + QS_INJ_CTRL_GAP_SIZE, + lan966x, QS_INJ_CTRL(0)); + + /* Enable IFH insertion/parsing on CPU ports */ + lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) | + SYS_PORT_MODE_INCL_XTR_HDR_SET(1), + lan966x, SYS_PORT_MODE(CPU_PORT)); + + /* Setup flooding PGIDs */ + lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC), + lan966x, ANA_FLOODING_IPMC); + + /* There are 8 priorities */ + for (i = 0; i < 8; ++i) + lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) | + ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) | + ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC), + ANA_FLOODING_FLD_MULTICAST | + ANA_FLOODING_FLD_UNICAST | + ANA_FLOODING_FLD_BROADCAST, + lan966x, ANA_FLOODING(i)); + + for (i = 0; i < PGID_ENTRIES; ++i) + /* Set all the entries to obey VLAN_VLAN */ + lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1), + ANA_PGID_CFG_OBEY_VLAN, + lan966x, ANA_PGID_CFG(i)); + + for (p = 0; p < lan966x->num_phys_ports; p++) { + /* Disable bridging by default */ + lan_rmw(ANA_PGID_PGID_SET(0x0), + ANA_PGID_PGID, + lan966x, ANA_PGID(p + PGID_SRC)); + + /* Do not forward BPDU frames to the front ports and copy them + * to CPU + */ + lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p)); + } + + /* Set source buffer size for each priority and each port to 1500 bytes */ + for (i = 0; i <= QSYS_Q_RSRV; ++i) { + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i)); + lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i)); + } + + /* Enable switching to/from cpu port */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Configure and enable the CPU port */ + lan_rmw(ANA_PGID_PGID_SET(0), + ANA_PGID_PGID, + lan966x, ANA_PGID(CPU_PORT)); + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_CPU)); + + /* Multicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MC)); + + /* This will be controlled by mrouter ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_MCIPV4)); + + /* Unicast to all other ports */ + lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_UC)); + + /* Broadcast to the CPU port and to other ports */ + lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)), + ANA_PGID_PGID, + lan966x, ANA_PGID(PGID_BC)); + + lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1), + lan966x, REW_PORT_CFG(CPU_PORT)); + + lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1), + ANA_ANAINTR_INTR_ENA, + lan966x, ANA_ANAINTR); +} + +static int lan966x_ram_init(struct lan966x *lan966x) +{ + return lan_rd(lan966x, SYS_RAM_INIT); +} + +static int lan966x_reset_switch(struct lan966x *lan966x) +{ + struct reset_control *switch_reset, *phy_reset; + int val = 0; + int ret; + + switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch"); + if (IS_ERR(switch_reset)) + return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset), + "Could not obtain switch reset"); + + phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy"); + if (IS_ERR(phy_reset)) + return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset), + "Could not obtain phy reset\n"); + + reset_control_reset(switch_reset); + reset_control_reset(phy_reset); + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); + lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); + ret = readx_poll_timeout(lan966x_ram_init, lan966x, + val, (val & BIT(1)) == 0, READL_SLEEP_US, + READL_TIMEOUT_US); + if (ret) + return ret; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG); + + return 0; +} + +static int lan966x_probe(struct platform_device *pdev) +{ + struct fwnode_handle *ports, *portnp; + struct lan966x *lan966x; + u8 mac_addr[ETH_ALEN]; + int err, i; + + lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL); + if (!lan966x) + return -ENOMEM; + + platform_set_drvdata(pdev, lan966x); + lan966x->dev = &pdev->dev; + + if (!device_get_mac_address(&pdev->dev, mac_addr)) { + ether_addr_copy(lan966x->base_mac, mac_addr); + } else { + pr_info("MAC addr was not set, use random MAC\n"); + eth_random_addr(lan966x->base_mac); + lan966x->base_mac[5] &= 0xf0; + } + + ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); + if (!ports) + return dev_err_probe(&pdev->dev, -ENODEV, + "no ethernet-ports child found\n"); + + err = lan966x_create_targets(pdev, lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Failed to create targets"); + + err = lan966x_reset_switch(lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Reset failed"); + + i = 0; + fwnode_for_each_available_child_node(ports, portnp) + ++i; + + lan966x->num_phys_ports = i; + lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports, + sizeof(struct lan966x_port *), + GFP_KERNEL); + if (!lan966x->ports) + return -ENOMEM; + + /* There QS system has 32KB of memory */ + lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY; + + /* set irq */ + lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr"); + if (lan966x->xtr_irq <= 0) + return -EINVAL; + + err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL, + lan966x_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", lan966x); + if (err) { + pr_err("Unable to use xtr irq"); + return -ENODEV; + } + + lan966x->ana_irq = platform_get_irq_byname(pdev, "ana"); + if (lan966x->ana_irq) { + err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL, + lan966x_ana_irq_handler, IRQF_ONESHOT, + "ana irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use ana irq"); + } + + /* init switch */ + lan966x_init(lan966x); + lan966x_stats_init(lan966x); + + /* go over the child nodes */ + fwnode_for_each_available_child_node(ports, portnp) { + phy_interface_t phy_mode; + struct phy *serdes; + u32 p; + + if (fwnode_property_read_u32(portnp, "reg", &p)) + continue; + + phy_mode = fwnode_get_phy_mode(portnp); + err = lan966x_probe_port(lan966x, p, phy_mode, portnp); + if (err) + goto cleanup_ports; + + /* Read needed configuration */ + lan966x->ports[p]->config.portmode = phy_mode; + lan966x->ports[p]->fwnode = fwnode_handle_get(portnp); + + serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL); + if (!IS_ERR(serdes)) + lan966x->ports[p]->serdes = serdes; + + lan966x_port_init(lan966x->ports[p]); + } + + lan966x_mdb_init(lan966x); + err = lan966x_fdb_init(lan966x); + if (err) + goto cleanup_ports; + + return 0; + +cleanup_ports: + fwnode_handle_put(portnp); + + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + return err; +} + +static int lan966x_remove(struct platform_device *pdev) +{ + struct lan966x *lan966x = platform_get_drvdata(pdev); + + lan966x_cleanup_ports(lan966x); + + cancel_delayed_work_sync(&lan966x->stats_work); + destroy_workqueue(lan966x->stats_queue); + mutex_destroy(&lan966x->stats_lock); + + lan966x_mac_purge_entries(lan966x); + lan966x_mdb_deinit(lan966x); + lan966x_fdb_deinit(lan966x); + + return 0; +} + +static struct platform_driver lan966x_driver = { + .probe = lan966x_probe, + .remove = lan966x_remove, + .driver = { + .name = "lan966x-switch", + .of_match_table = lan966x_match, + }, +}; + +static int __init lan966x_switch_driver_init(void) +{ + int ret; + + lan966x_register_notifier_blocks(); + + ret = platform_driver_register(&lan966x_driver); + if (ret) + goto err; + + return 0; + +err: + lan966x_unregister_notifier_blocks(); + return ret; +} + +static void __exit lan966x_switch_driver_exit(void) +{ + platform_driver_unregister(&lan966x_driver); + lan966x_unregister_notifier_blocks(); +} + +module_init(lan966x_switch_driver_init); +module_exit(lan966x_switch_driver_exit); + +MODULE_DESCRIPTION("Microchip LAN966X switch driver"); +MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h new file mode 100644 index 000000000000..99c6d0a9f946 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef __LAN966X_MAIN_H__ +#define __LAN966X_MAIN_H__ + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/jiffies.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <net/switchdev.h> + +#include "lan966x_regs.h" +#include "lan966x_ifh.h" + +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + +#define LAN966X_BUFFER_CELL_SZ 64 +#define LAN966X_BUFFER_MEMORY (160 * 1024) +#define LAN966X_BUFFER_MIN_SZ 60 + +#define PGID_AGGR 64 +#define PGID_SRC 80 +#define PGID_ENTRIES 89 + +#define UNAWARE_PVID 0 +#define HOST_PVID 4095 + +/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */ +#define QSYS_Q_RSRV 95 + +#define CPU_PORT 8 + +/* Reserved PGIDs */ +#define PGID_CPU (PGID_AGGR - 6) +#define PGID_UC (PGID_AGGR - 5) +#define PGID_BC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +/* Non-reserved PGIDs, used for general purpose */ +#define PGID_GP_START (CPU_PORT + 1) +#define PGID_GP_END PGID_CPU + +#define LAN966X_SPEED_NONE 0 +#define LAN966X_SPEED_2500 1 +#define LAN966X_SPEED_1000 1 +#define LAN966X_SPEED_100 2 +#define LAN966X_SPEED_10 3 + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACV4, + ENTRYTYPE_MACV6, +}; + +struct lan966x_port; + +struct lan966x_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +struct lan966x { + struct device *dev; + + u8 num_phys_ports; + struct lan966x_port **ports; + + void __iomem *regs[NUM_TARGETS]; + + int shared_queue_sz; + + u8 base_mac[ETH_ALEN]; + + struct net_device *bridge; + u16 bridge_mask; + u16 bridge_fwd_mask; + + struct list_head mac_entries; + spinlock_t mac_lock; /* lock for mac_entries list */ + + u16 vlan_mask[VLAN_N_VID]; + DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID); + + /* stats */ + const struct lan966x_stat_layout *stats_layout; + u32 num_stats; + + /* workqueue for reading stats */ + struct mutex stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; + + /* interrupts */ + int xtr_irq; + int ana_irq; + + /* worqueue for fdb */ + struct workqueue_struct *fdb_work; + struct list_head fdb_entries; + + /* mdb */ + struct list_head mdb_entries; + struct list_head pgid_entries; +}; + +struct lan966x_port_config { + phy_interface_t portmode; + const unsigned long *advertising; + int speed; + int duplex; + u32 pause; + bool inband; + bool autoneg; +}; + +struct lan966x_port { + struct net_device *dev; + struct lan966x *lan966x; + + u8 chip_port; + u16 pvid; + u16 vid; + bool vlan_aware; + + bool learn_ena; + + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + struct lan966x_port_config config; + struct phylink *phylink; + struct phy *serdes; + struct fwnode_handle *fwnode; +}; + +extern const struct phylink_mac_ops lan966x_phylink_mac_ops; +extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops; +extern const struct ethtool_ops lan966x_ethtool_ops; + +bool lan966x_netdevice_check(const struct net_device *dev); + +void lan966x_register_notifier_blocks(void); +void lan966x_unregister_notifier_blocks(void); + +void lan966x_stats_get(struct net_device *dev, + struct rtnl_link_stats64 *stats); +int lan966x_stats_init(struct lan966x *lan966x); + +void lan966x_port_config_down(struct lan966x_port *port); +void lan966x_port_config_up(struct lan966x_port *port); +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state); +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config); +void lan966x_port_init(struct lan966x_port *port); + +int lan966x_mac_ip_learn(struct lan966x *lan966x, + bool cpu_copy, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_learn(struct lan966x *lan966x, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type); +int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid); +int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid); +void lan966x_mac_init(struct lan966x *lan966x); +void lan966x_mac_set_ageing(struct lan966x *lan966x, + u32 ageing); +int lan966x_mac_del_entry(struct lan966x *lan966x, + const unsigned char *addr, + u16 vid); +int lan966x_mac_add_entry(struct lan966x *lan966x, + struct lan966x_port *port, + const unsigned char *addr, + u16 vid); +void lan966x_mac_purge_entries(struct lan966x *lan966x); +irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x); + +void lan966x_vlan_init(struct lan966x *lan966x); +void lan966x_vlan_port_apply(struct lan966x_port *port); +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware); +int lan966x_vlan_port_set_vid(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged); +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid); +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid); +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid); + +void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid); +void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid); +int lan966x_fdb_init(struct lan966x *lan966x); +void lan966x_fdb_deinit(struct lan966x *lan966x); +int lan966x_handle_fdb(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info); + +void lan966x_mdb_init(struct lan966x *lan966x); +void lan966x_mdb_deinit(struct lan966x *lan966x); +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj); +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj); +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid); +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid); + +static inline void __iomem *lan_addr(void __iomem *base[], + int id, int tinst, int tcnt, + int gbase, int ginst, + int gcnt, int gwidth, + int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON((tinst) >= tcnt); + WARN_ON((ginst) >= gcnt); + WARN_ON((rinst) >= rcnt); + return base[id + (tinst)] + + gbase + ((ginst) * gwidth) + + raddr + ((rinst) * rwidth); +} + +static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_wr(u32 val, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + writel(val, lan_addr(lan966x->regs, id, tinst, tcnt, + gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth)); +} + +static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x, + int id, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 nval; + + nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); + nval = (nval & ~mask) | (val & mask); + writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst, + gcnt, gwidth, raddr, rinst, rcnt, rwidth)); +} + +#endif /* __LAN966X_MAIN_H__ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c new file mode 100644 index 000000000000..c68d0a99d292 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <net/switchdev.h> + +#include "lan966x_main.h" + +struct lan966x_pgid_entry { + struct list_head list; + int index; + refcount_t refcount; + u16 ports; +}; + +struct lan966x_mdb_entry { + struct list_head list; + unsigned char mac[ETH_ALEN]; + u16 vid; + u16 ports; + struct lan966x_pgid_entry *pgid; + u8 cpu_copy; +}; + +void lan966x_mdb_init(struct lan966x *lan966x) +{ + INIT_LIST_HEAD(&lan966x->mdb_entries); + INIT_LIST_HEAD(&lan966x->pgid_entries); +} + +static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x) +{ + struct lan966x_mdb_entry *mdb_entry, *tmp; + + list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + } +} + +static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x) +{ + struct lan966x_pgid_entry *pgid_entry, *tmp; + + list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) { + list_del(&pgid_entry->list); + kfree(pgid_entry); + } +} + +void lan966x_mdb_deinit(struct lan966x *lan966x) +{ + lan966x_mdb_purge_mdb_entries(lan966x); + lan966x_mdb_purge_pgid_entries(lan966x); +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_get(struct lan966x *lan966x, + const unsigned char *mac, + u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (ether_addr_equal(mdb_entry->mac, mac) && + mdb_entry->vid == vid) + return mdb_entry; + } + + return NULL; +} + +static struct lan966x_mdb_entry * +lan966x_mdb_entry_add(struct lan966x *lan966x, + const struct switchdev_obj_port_mdb *mdb) +{ + struct lan966x_mdb_entry *mdb_entry; + + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + return ERR_PTR(-ENOMEM); + + ether_addr_copy(mdb_entry->mac, mdb->addr); + mdb_entry->vid = mdb->vid; + + list_add_tail(&mdb_entry->list, &lan966x->mdb_entries); + + return mdb_entry; +} + +static void lan966x_mdb_encode_mac(unsigned char *mac, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + ether_addr_copy(mac, mdb_entry->mac); + + if (type == ENTRYTYPE_MACV4) { + mac[0] = 0; + mac[1] = mdb_entry->ports >> 8; + mac[2] = mdb_entry->ports & 0xff; + } else if (type == ENTRYTYPE_MACV6) { + mac[0] = mdb_entry->ports >> 8; + mac[1] = mdb_entry->ports & 0xff; + } +} + +static int lan966x_mdb_ip_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + bool cpu_copy = false; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) + mdb_entry->cpu_copy++; + else + mdb_entry->ports |= BIT(port->chip_port); + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + cpu_copy = true; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, cpu_copy, + mac, mdb_entry->vid, type); +} + +static int lan966x_mdb_ip_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports && !mdb_entry->cpu_copy) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy, + mac, mdb_entry->vid, type); +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports) +{ + struct lan966x_pgid_entry *pgid_entry; + + pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL); + if (!pgid_entry) + return ERR_PTR(-ENOMEM); + + pgid_entry->ports = ports; + pgid_entry->index = index; + refcount_set(&pgid_entry->refcount, 1); + + list_add_tail(&pgid_entry->list, &lan966x->pgid_entries); + + return pgid_entry; +} + +static struct lan966x_pgid_entry * +lan966x_pgid_entry_get(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry) +{ + struct lan966x_pgid_entry *pgid_entry; + int index; + + /* Try to find an existing pgid that uses the same ports as the + * mdb_entry + */ + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->ports == mdb_entry->ports) { + refcount_inc(&pgid_entry->refcount); + return pgid_entry; + } + } + + /* Try to find an empty pgid entry and allocate one in case it finds it, + * otherwise it means that there are no more resources + */ + for (index = PGID_GP_START; index < PGID_GP_END; index++) { + bool used = false; + + list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) { + if (pgid_entry->index == index) { + used = true; + break; + } + } + + if (!used) + return lan966x_pgid_entry_add(lan966x, index, + mdb_entry->ports); + } + + return ERR_PTR(-ENOSPC); +} + +static void lan966x_pgid_entry_del(struct lan966x *lan966x, + struct lan966x_pgid_entry *pgid_entry) +{ + if (!refcount_dec_and_test(&pgid_entry->refcount)) + return; + + list_del(&pgid_entry->list); + kfree(pgid_entry); +} + +static int lan966x_mdb_l2_add(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) { + mdb_entry = lan966x_mdb_entry_add(lan966x, mdb); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); + } else { + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + } + + if (cpu_port) { + mdb_entry->ports |= BIT(CPU_PORT); + mdb_entry->cpu_copy++; + } else { + mdb_entry->ports |= BIT(port->chip_port); + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + /* Copy the frame to CPU only if the CPU is in the VLAN */ + if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) && + mdb_entry->cpu_copy) + mdb_entry->ports &= BIT(CPU_PORT); + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static int lan966x_mdb_l2_del(struct lan966x_port *port, + const struct switchdev_obj_port_mdb *mdb, + enum macaccess_entry_type type) +{ + bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_pgid_entry *pgid_entry; + struct lan966x_mdb_entry *mdb_entry; + unsigned char mac[ETH_ALEN]; + u16 ports; + + mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid); + if (!mdb_entry) + return -ENOENT; + + ports = mdb_entry->ports; + if (cpu_port) { + /* If there are still other references to the CPU port then + * there is no point to delete and add again the same entry + */ + mdb_entry->cpu_copy--; + if (mdb_entry->cpu_copy) + return 0; + + ports &= ~BIT(CPU_PORT); + } else { + ports &= ~BIT(port->chip_port); + } + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + + mdb_entry->ports = ports; + + if (!mdb_entry->ports) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return 0; + } + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) { + list_del(&mdb_entry->list); + kfree(mdb_entry); + return PTR_ERR(pgid_entry); + } + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +static enum macaccess_entry_type +lan966x_mdb_classify(const unsigned char *mac) +{ + if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e) + return ENTRYTYPE_MACV4; + if (mac[0] == 0x33 && mac[1] == 0x33) + return ENTRYTYPE_MACV6; + return ENTRYTYPE_LOCKED; +} + +int lan966x_handle_port_mdb_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are added for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_add(port, mdb, type); + + return lan966x_mdb_l2_add(port, mdb, type); +} + +int lan966x_handle_port_mdb_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + enum macaccess_entry_type type; + + /* Split the way the entries are removed for ipv4/ipv6 and for l2. The + * reason is that for ipv4/ipv6 it doesn't require to use any pgid + * entry, while for l2 is required to use pgid entries + */ + type = lan966x_mdb_classify(mdb->addr); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + return lan966x_mdb_ip_del(port, mdb, type); + + return lan966x_mdb_l2_del(port, mdb, type); +} + +static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports |= BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type); + } +} + +static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + unsigned char mac[ETH_ALEN]; + + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type); +} + +static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x, + struct lan966x_mdb_entry *mdb_entry, + enum macaccess_entry_type type) +{ + struct lan966x_pgid_entry *pgid_entry; + unsigned char mac[ETH_ALEN]; + + lan966x_pgid_entry_del(lan966x, mdb_entry->pgid); + lan966x_mdb_encode_mac(mac, mdb_entry, type); + lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type); + + mdb_entry->ports &= ~BIT(CPU_PORT); + + pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry); + if (IS_ERR(pgid_entry)) + return; + + mdb_entry->pgid = pgid_entry; + + lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports), + ANA_PGID_PGID, + lan966x, ANA_PGID(pgid_entry->index)); + + lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac, + mdb_entry->vid, type); +} + +void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid) +{ + struct lan966x_mdb_entry *mdb_entry; + enum macaccess_entry_type type; + + list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) { + if (mdb_entry->vid != vid || !mdb_entry->cpu_copy) + continue; + + type = lan966x_mdb_classify(mdb_entry->mac); + if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6) + lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type); + else + lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type); + } +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c new file mode 100644 index 000000000000..b66a9aa00ea4 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/module.h> +#include <linux/phylink.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/phy/phy.h> +#include <linux/sfp.h> + +#include "lan966x_main.h" + +static void lan966x_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static int lan966x_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t iface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + int err; + + if (port->serdes) { + err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, + iface); + if (err) { + netdev_err(to_net_dev(config->dev), + "Could not set mode of SerDes\n"); + return err; + } + } + + return 0; +} + +static void lan966x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x_port_config *port_config = &port->config; + + port_config->duplex = duplex; + port_config->speed = speed; + port_config->pause = 0; + port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0; + port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0; + + lan966x_port_config_up(port); +} + +static void lan966x_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct lan966x_port *port = netdev_priv(to_net_dev(config->dev)); + struct lan966x *lan966x = port->lan966x; + + lan966x_port_config_down(port); + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} + +static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct lan966x_port, phylink_pcs); +} + +static void lan966x_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + + lan966x_port_status_get(port, state); +} + +static int lan966x_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct lan966x_port *port = lan966x_pcs_to_port(pcs); + struct lan966x_port_config config; + int ret; + + config = port->config; + config.portmode = interface; + config.inband = phylink_autoneg_inband(mode); + config.autoneg = phylink_test(advertising, Autoneg); + config.advertising = advertising; + + ret = lan966x_port_pcs_set(port, &config); + if (ret) + netdev_err(port->dev, "port PCS config failed: %d\n", ret); + + return ret; +} + +static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs) +{ + /* Currently not used */ +} + +const struct phylink_mac_ops lan966x_phylink_mac_ops = { + .validate = phylink_generic_validate, + .mac_config = lan966x_phylink_mac_config, + .mac_prepare = lan966x_phylink_mac_prepare, + .mac_link_down = lan966x_phylink_mac_link_down, + .mac_link_up = lan966x_phylink_mac_link_up, +}; + +const struct phylink_pcs_ops lan966x_phylink_pcs_ops = { + .pcs_get_state = lan966x_pcs_get_state, + .pcs_config = lan966x_pcs_config, + .pcs_an_restart = lan966x_pcs_aneg_restart, +}; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c new file mode 100644 index 000000000000..237555845a52 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/netdevice.h> +#include <linux/phy/phy.h> + +#include "lan966x_main.h" + +/* Watermark encode */ +#define MULTIPLIER_BIT BIT(8) +static u32 lan966x_wm_enc(u32 value) +{ + value /= LAN966X_BUFFER_CELL_SZ; + + if (value >= MULTIPLIER_BIT) { + value /= 16; + if (value >= MULTIPLIER_BIT) + value = (MULTIPLIER_BIT - 1); + + value |= MULTIPLIER_BIT; + } + + return value; +} + +static void lan966x_port_link_down(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u32 val, delay = 0; + + /* 0.5: Disable any AFI */ + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(0), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); + + /* wait for reg afi_port_frm_out to become 0 for the port */ + while (true) { + val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port)); + if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("AFI timeout chip port %u", port->chip_port); + break; + } + } + + delay = 0; + + /* 1: Reset the PCS Rx clock domain */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1), + DEV_CLOCK_CFG_PCS_RX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 2: Disable MAC frame reception */ + lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0), + DEV_MAC_ENA_CFG_RX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* 3: Disable traffic being sent to or from switch port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 4: Disable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 5: Disable Flowcontrol */ + lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0), + SYS_PAUSE_CFG_PAUSE_ENA, + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* 5.1: Disable PFC */ + lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0), + QSYS_SW_PORT_MODE_TX_PFC_ENA, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */ + usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC); + + /* 7: Disable HDX backpressure */ + lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0), + SYS_FRONT_PORT_MODE_HDX_MODE, + lan966x, SYS_FRONT_PORT_MODE(port->chip_port)); + + /* 8: Flush the queues accociated with the port */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* 9: Enable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan966x, QSYS_PORT_MODE(port->chip_port)); + + /* 10: Wait until flushing is complete */ + while (true) { + val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port)); + if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val)) + break; + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + delay++; + if (delay == 2000) { + pr_err("Flush timeout chip port %u", port->chip_port); + break; + } + } + + /* 11: Reset the Port and MAC clock domains */ + lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0), + DEV_MAC_ENA_CFG_TX_ENA, + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) | + DEV_CLOCK_CFG_MAC_RX_RST_SET(1) | + DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST | + DEV_CLOCK_CFG_PORT_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* 12: Clear flushing */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2), + QSYS_SW_PORT_MODE_AGING_MODE, + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + /* The port is disabled and flushed, now set up the port in the + * new operating mode + */ +} + +static void lan966x_port_link_up(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + int speed = 0, mode = 0; + int atop_wm = 0; + + switch (config->speed) { + case SPEED_10: + speed = LAN966X_SPEED_10; + break; + case SPEED_100: + speed = LAN966X_SPEED_100; + break; + case SPEED_1000: + speed = LAN966X_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + case SPEED_2500: + speed = LAN966X_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + break; + } + + /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the + * port speed for QSGMII ports. + */ + if (config->portmode == PHY_INTERFACE_MODE_QSGMII) + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + + lan_wr(config->duplex | mode, + lan966x, DEV_MAC_MODE_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) | + DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) | + DEV_MAC_IFG_CFG_RX_IFG2_SET(2), + DEV_MAC_IFG_CFG_TX_IFG | + DEV_MAC_IFG_CFG_RX_IFG1 | + DEV_MAC_IFG_CFG_RX_IFG2, + lan966x, DEV_MAC_IFG_CFG(port->chip_port)); + + lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) | + DEV_MAC_HDX_CFG_SEED_LOAD_SET(1), + DEV_MAC_HDX_CFG_SEED | + DEV_MAC_HDX_CFG_SEED_LOAD, + lan966x, DEV_MAC_HDX_CFG(port->chip_port)); + + if (config->portmode == PHY_INTERFACE_MODE_GMII) { + if (config->speed == SPEED_1000) + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + else + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, + lan966x, + CHIP_TOP_CUPHY_PORT_CFG(port->chip_port)); + } + + /* No PFC */ + lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed), + lan966x, ANA_PFC_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1), + DEV_PCS1G_CFG_PCS_ENA, + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0), + DEV_PCS1G_SD_CFG_SD_ENA, + lan966x, DEV_PCS1G_SD_CFG(port->chip_port)); + + /* Set Pause WM hysteresis, start/stop are in 1518 byte units */ + lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) | + SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) | + SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)), + lan966x, SYS_PAUSE_CFG(port->chip_port)); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port)); + lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port)); + + /* Flow control */ + lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) | + SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0), + SYS_MAC_FC_CFG_FC_LINK_SPEED | + SYS_MAC_FC_CFG_FC_LATENCY_CFG | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG | + SYS_MAC_FC_CFG_RX_FC_ENA | + SYS_MAC_FC_CFG_TX_FC_ENA, + lan966x, SYS_MAC_FC_CFG(port->chip_port)); + + /* Tail dropping watermark */ + atop_wm = lan966x->shared_queue_sz; + + /* The total memory size is diveded by number of front ports plus CPU + * port + */ + lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x, + SYS_ATOP(port->chip_port)); + lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG); + + /* This needs to be at the end */ + /* Enable MAC module */ + lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV_MAC_ENA_CFG_TX_ENA_SET(1), + lan966x, DEV_MAC_ENA_CFG(port->chip_port)); + + /* Take out the clock from reset */ + lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed), + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + /* Core: Enable port for frame transfer */ + lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + lan966x, QSYS_SW_PORT_MODE(port->chip_port)); + + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(16), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan966x, AFI_PORT_CFG(port->chip_port)); +} + +void lan966x_port_config_down(struct lan966x_port *port) +{ + lan966x_port_link_down(port); +} + +void lan966x_port_config_up(struct lan966x_port *port) +{ + lan966x_port_link_up(port); +} + +void lan966x_port_status_get(struct lan966x_port *port, + struct phylink_link_state *state) +{ + struct lan966x *lan966x = port->lan966x; + bool link_down; + u16 bmsr = 0; + u16 lp_adv; + u32 val; + + val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port)); + link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val); + if (link_down) + lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port)); + + /* Get both current Link and Sync status */ + val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port)); + state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) && + DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val); + state->link &= !link_down; + + /* Get PCS ANEG status register */ + val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port)); + /* Aneg complete provides more information */ + if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) { + state->an_complete = true; + + bmsr |= state->link ? BMSR_LSTATUS : 0; + bmsr |= BMSR_ANEGCOMPLETE; + + lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val); + phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv); + } else { + if (!state->link) + return; + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->speed = SPEED_1000; + else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + + state->duplex = DUPLEX_FULL; + } +} + +int lan966x_port_pcs_set(struct lan966x_port *port, + struct lan966x_port_config *config) +{ + struct lan966x *lan966x = port->lan966x; + bool inband_aneg = false; + bool outband; + + if (config->inband) { + if (config->portmode == PHY_INTERFACE_MODE_SGMII || + config->portmode == PHY_INTERFACE_MODE_QSGMII) + inband_aneg = true; /* Cisco-SGMII in-band-aneg */ + else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX && + config->autoneg) + inband_aneg = true; /* Clause-37 in-band-aneg */ + + outband = false; + } else { + outband = true; + } + + /* Disable or enable inband */ + lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband), + DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, + lan966x, DEV_PCS1G_MODE_CFG(port->chip_port)); + + /* Enable PCS */ + lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1), + lan966x, DEV_PCS1G_CFG(port->chip_port)); + + if (inband_aneg) { + int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode, + config->advertising); + if (adv >= 0) + /* Enable in-band aneg */ + lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) | + DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_ENA_SET(1) | + DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1), + lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } else { + lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port)); + } + + /* Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) | + DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_LINK_SPEED | + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan966x, DEV_CLOCK_CFG(port->chip_port)); + + port->config = *config; + + return 0; +} + +void lan966x_port_init(struct lan966x_port *port) +{ + struct lan966x_port_config *config = &port->config; + struct lan966x *lan966x = port->lan966x; + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_port_config_down(port); + + if (config->portmode != PHY_INTERFACE_MODE_QSGMII) + return; + + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0) | + DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST | + DEV_CLOCK_CFG_LINK_SPEED, + lan966x, DEV_CLOCK_CFG(port->chip_port)); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h new file mode 100644 index 000000000000..797560172aca --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -0,0 +1,871 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ + +/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200. + * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty) + */ + +#ifndef _LAN966X_REGS_H_ +#define _LAN966X_REGS_H_ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/bug.h> + +enum lan966x_target { + TARGET_AFI = 2, + TARGET_ANA = 3, + TARGET_CHIP_TOP = 5, + TARGET_CPU = 6, + TARGET_DEV = 13, + TARGET_GCB = 27, + TARGET_ORG = 36, + TARGET_QS = 42, + TARGET_QSYS = 46, + TARGET_REW = 47, + TARGET_SYS = 52, + NUM_TARGETS = 66 +}; + +#define __REG(...) __VA_ARGS__ + +/* AFI:PORT_TBL:PORT_FRM_OUT */ +#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4) + +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\ + FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\ + FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) + +/* AFI:PORT_TBL:PORT_CFG */ +#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4) + +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) + +#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0) +#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x) +#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x) + +/* ANA:ANA:ADVLEARN */ +#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4) + +#define ANA_ADVLEARN_VLAN_CHK BIT(0) +#define ANA_ADVLEARN_VLAN_CHK_SET(x)\ + FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x) +#define ANA_ADVLEARN_VLAN_CHK_GET(x)\ + FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x) + +/* ANA:ANA:VLANMASK */ +#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4) + +/* ANA:ANA:ANAINTR */ +#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4) + +#define ANA_ANAINTR_INTR BIT(1) +#define ANA_ANAINTR_INTR_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR, x) +#define ANA_ANAINTR_INTR_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR, x) + +#define ANA_ANAINTR_INTR_ENA BIT(0) +#define ANA_ANAINTR_INTR_ENA_SET(x)\ + FIELD_PREP(ANA_ANAINTR_INTR_ENA, x) +#define ANA_ANAINTR_INTR_ENA_GET(x)\ + FIELD_GET(ANA_ANAINTR_INTR_ENA, x) + +/* ANA:ANA:AUTOAGE */ +#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4) + +#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x) +#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x) + +/* ANA:ANA:FLOODING */ +#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4) + +#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x) +#define ANA_FLOODING_FLD_UNICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_UNICAST, x) + +#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x) +#define ANA_FLOODING_FLD_BROADCAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x) + +#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0) +#define ANA_FLOODING_FLD_MULTICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x) +#define ANA_FLOODING_FLD_MULTICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x) + +/* ANA:ANA:FLOODING_IPMC */ +#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) + +/* ANA:PGID:PGID */ +#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4) + +#define ANA_PGID_PGID GENMASK(8, 0) +#define ANA_PGID_PGID_SET(x)\ + FIELD_PREP(ANA_PGID_PGID, x) +#define ANA_PGID_PGID_GET(x)\ + FIELD_GET(ANA_PGID_PGID, x) + +/* ANA:PGID:PGID_CFG */ +#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4) + +#define ANA_PGID_CFG_OBEY_VLAN BIT(0) +#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x) +#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\ + FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x) + +/* ANA:ANA_TABLES:MACHDATA */ +#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4) + +/* ANA:ANA_TABLES:MACLDATA */ +#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4) + +/* ANA:ANA_TABLES:MACACCESS */ +#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4) + +#define ANA_MACACCESS_CHANGE2SW BIT(17) +#define ANA_MACACCESS_CHANGE2SW_SET(x)\ + FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x) +#define ANA_MACACCESS_CHANGE2SW_GET(x)\ + FIELD_GET(ANA_MACACCESS_CHANGE2SW, x) + +#define ANA_MACACCESS_MAC_CPU_COPY BIT(16) +#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x) +#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x) + +#define ANA_MACACCESS_VALID BIT(12) +#define ANA_MACACCESS_VALID_SET(x)\ + FIELD_PREP(ANA_MACACCESS_VALID, x) +#define ANA_MACACCESS_VALID_GET(x)\ + FIELD_GET(ANA_MACACCESS_VALID, x) + +#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10) +#define ANA_MACACCESS_ENTRYTYPE_SET(x)\ + FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x) +#define ANA_MACACCESS_ENTRYTYPE_GET(x)\ + FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x) + +#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4) +#define ANA_MACACCESS_DEST_IDX_SET(x)\ + FIELD_PREP(ANA_MACACCESS_DEST_IDX, x) +#define ANA_MACACCESS_DEST_IDX_GET(x)\ + FIELD_GET(ANA_MACACCESS_DEST_IDX, x) + +#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0) +#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x) +#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x) + +/* ANA:ANA_TABLES:MACTINDX */ +#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4) + +#define ANA_MACTINDX_BUCKET GENMASK(12, 11) +#define ANA_MACTINDX_BUCKET_SET(x)\ + FIELD_PREP(ANA_MACTINDX_BUCKET, x) +#define ANA_MACTINDX_BUCKET_GET(x)\ + FIELD_GET(ANA_MACTINDX_BUCKET, x) + +#define ANA_MACTINDX_M_INDEX GENMASK(10, 0) +#define ANA_MACTINDX_M_INDEX_SET(x)\ + FIELD_PREP(ANA_MACTINDX_M_INDEX, x) +#define ANA_MACTINDX_M_INDEX_GET(x)\ + FIELD_GET(ANA_MACTINDX_M_INDEX, x) + +/* ANA:ANA_TABLES:VLAN_PORT_MASK */ +#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4) + +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\ + FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\ + FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) + +/* ANA:ANA_TABLES:VLANACCESS */ +#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4) + +#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0) +#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\ + FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x) +#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\ + FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x) + +/* ANA:ANA_TABLES:VLANTIDX */ +#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4) + +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) + +#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0) +#define ANA_VLANTIDX_V_INDEX_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_V_INDEX, x) +#define ANA_VLANTIDX_V_INDEX_GET(x)\ + FIELD_GET(ANA_VLANTIDX_V_INDEX, x) + +/* ANA:PORT:VLAN_CFG */ +#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4) + +#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) + +#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18) +#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x) +#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x) + +#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0) +#define ANA_VLAN_CFG_VLAN_VID_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x) +#define ANA_VLAN_CFG_VLAN_VID_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x) + +/* ANA:PORT:DROP_CFG */ +#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4) + +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) + +/* ANA:PORT:CPU_FWD_CFG */ +#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4) + +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) + +/* ANA:PORT:CPU_FWD_BPDU_CFG */ +#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4) + +/* ANA:PORT:PORT_CFG */ +#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4) + +#define ANA_PORT_CFG_LEARNAUTO BIT(6) +#define ANA_PORT_CFG_LEARNAUTO_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x) +#define ANA_PORT_CFG_LEARNAUTO_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x) + +#define ANA_PORT_CFG_LEARN_ENA BIT(5) +#define ANA_PORT_CFG_LEARN_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x) +#define ANA_PORT_CFG_LEARN_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x) + +#define ANA_PORT_CFG_RECV_ENA BIT(4) +#define ANA_PORT_CFG_RECV_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x) +#define ANA_PORT_CFG_RECV_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_RECV_ENA, x) + +#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0) +#define ANA_PORT_CFG_PORTID_VAL_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x) +#define ANA_PORT_CFG_PORTID_VAL_GET(x)\ + FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) + +/* ANA:PFC:PFC_CFG */ +#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4) + +#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0) +#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x) +#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x) + +/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */ +#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4) + +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) + +/* DEV:PORT_MODE:CLOCK_CFG */ +#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4) + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x) +#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x) + +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x) +#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x) + +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x) +#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x) + +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x) +#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x) + +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PORT_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x) +#define DEV_CLOCK_CFG_PORT_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x) + +#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0) +#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x) +#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x) + +/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4) + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x) +#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x) + +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x) +#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4) + +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4) + +#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) + +#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x) +#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x) + +#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x) +#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x) +#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4) + +#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x) +#define DEV_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED, x) + +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */ +#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */ +#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4) + +#define DEV_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x) +#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */ +#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4) + +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) +#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4) + +#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x) +#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */ +#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4) + +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) +#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x) + +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) +#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x) + +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) +#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x) + +#define DEV_PCS1G_ANEG_CFG_ENA BIT(0) +#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x) +#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */ +#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4) + +#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) +#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x) + +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\ + FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) +#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\ + FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */ +#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4) + +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x) + +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\ + FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) +#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\ + FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */ +#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4) + +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\ + FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) +#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ + FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) + +/* DEVCPU_QS:XTR:XTR_GRP_CFG */ +#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4) + +#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_MODE, x) +#define QS_XTR_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_MODE, x) + +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x) +#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:XTR:XTR_RD */ +#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */ +#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_WR */ +#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* DEVCPU_QS:INJ:INJ_STATUS */ +#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4) + +#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x) +#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\ + FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x) + +#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_SET(x)\ + FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x) +#define QS_INJ_STATUS_FIFO_RDY_GET(x)\ + FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x) + +/* QSYS:SYSTEM:PORT_MODE */ +#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4) + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x) +#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x) + +/* QSYS:SYSTEM:SWITCH_PORT_MODE */ +#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4) + +#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18) +#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x) +#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x) + +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) + +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) + +#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0) +#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x) +#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x) + +/* QSYS:SYSTEM:SW_STATUS */ +#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4) + +#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0) +#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\ + FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x) +#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\ + FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x) + +/* QSYS:SYSTEM:CPU_GROUP_MAP */ +#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4) + +/* QSYS:RES_CTRL:RES_CFG */ +#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x) +#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CFG */ +#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4) + +#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_CFG, x) +#define REW_TAG_CFG_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_CFG, x) + +#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x) +#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x) + +/* REW:PORT:PORT_CFG */ +#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4) + +#define REW_PORT_CFG_NO_REWRITE BIT(0) +#define REW_PORT_CFG_NO_REWRITE_SET(x)\ + FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x) +#define REW_PORT_CFG_NO_REWRITE_GET(x)\ + FIELD_GET(REW_PORT_CFG_NO_REWRITE, x) + +/* SYS:SYSTEM:RESET_CFG */ +#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4) + +#define SYS_RESET_CFG_CORE_ENA BIT(0) +#define SYS_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x) +#define SYS_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(SYS_RESET_CFG_CORE_ENA, x) + +/* SYS:SYSTEM:PORT_MODE */ +#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4) + +#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4) +#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x) +#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x) + +#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2) +#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x) +#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x) + +/* SYS:SYSTEM:FRONT_PORT_MODE */ +#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4) + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1) +#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x) +#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x) + +/* SYS:SYSTEM:FRM_AGING */ +#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\ + FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x) +#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\ + FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x) + +/* SYS:SYSTEM:STAT_CFG */ +#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4) + +#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0) +#define SYS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x) +#define SYS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x) + +/* SYS:PAUSE_CFG:PAUSE_CFG */ +#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4) + +#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x) +#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x) + +#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x) +#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x) + +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) +#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x) +#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x) + +/* SYS:PAUSE_CFG:ATOP */ +#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4) + +/* SYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4) + +/* SYS:PAUSE_CFG:MAC_FC_CFG */ +#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4) + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) + +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) + +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) + +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x) +#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x) +#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) + +/* SYS:STAT:CNT */ +#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4) + +/* SYS:RAM_CTRL:RAM_INIT */ +#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x) +#define SYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_INIT, x) + +#endif /* _LAN966X_REGS_H_ */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c new file mode 100644 index 000000000000..7de55f6a4da8 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/if_bridge.h> +#include <net/switchdev.h> + +#include "lan966x_main.h" + +static struct notifier_block lan966x_netdevice_nb __read_mostly; +static struct notifier_block lan966x_switchdev_nb __read_mostly; +static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly; + +static void lan966x_port_set_mcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_MC)); +} + +static void lan966x_port_set_ucast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_UC)); +} + +static void lan966x_port_set_bcast_flood(struct lan966x_port *port, + bool enabled) +{ + u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC)); + + val = ANA_PGID_PGID_GET(val); + if (enabled) + val |= BIT(port->chip_port); + else + val &= ~BIT(port->chip_port); + + lan_rmw(ANA_PGID_PGID_SET(val), + ANA_PGID_PGID, + port->lan966x, ANA_PGID(PGID_BC)); +} + +static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled) +{ + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled), + ANA_PORT_CFG_LEARN_ENA, + port->lan966x, ANA_PORT_CFG(port->chip_port)); + + port->learn_ena = enabled; +} + +static void lan966x_port_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & BR_MCAST_FLOOD) + lan966x_port_set_mcast_flood(port, + !!(flags.val & BR_MCAST_FLOOD)); + + if (flags.mask & BR_FLOOD) + lan966x_port_set_ucast_flood(port, + !!(flags.val & BR_FLOOD)); + + if (flags.mask & BR_BCAST_FLOOD) + lan966x_port_set_bcast_flood(port, + !!(flags.val & BR_BCAST_FLOOD)); + + if (flags.mask & BR_LEARNING) + lan966x_port_set_learning(port, + !!(flags.val & BR_LEARNING)); +} + +static int lan966x_port_pre_bridge_flags(struct lan966x_port *port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD | + BR_LEARNING)) + return -EINVAL; + + return 0; +} + +static void lan966x_update_fwd_mask(struct lan966x *lan966x) +{ + int i; + + for (i = 0; i < lan966x->num_phys_ports; i++) { + struct lan966x_port *port = lan966x->ports[i]; + unsigned long mask = 0; + + if (port && lan966x->bridge_fwd_mask & BIT(i)) + mask = lan966x->bridge_fwd_mask & ~BIT(i); + + mask |= BIT(CPU_PORT); + + lan_wr(ANA_PGID_PGID_SET(mask), + lan966x, ANA_PGID(PGID_SRC + i)); + } +} + +static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state) +{ + struct lan966x *lan966x = port->lan966x; + bool learn_ena = false; + + if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) && + port->learn_ena) + learn_ena = true; + + if (state == BR_STATE_FORWARDING) + lan966x->bridge_fwd_mask |= BIT(port->chip_port); + else + lan966x->bridge_fwd_mask &= ~BIT(port->chip_port); + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena), + ANA_PORT_CFG_LEARN_ENA, + lan966x, ANA_PORT_CFG(port->chip_port)); + + lan966x_update_fwd_mask(lan966x); +} + +static void lan966x_port_ageing_set(struct lan966x_port *port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + lan966x_mac_set_ageing(port->lan966x, ageing_time); +} + +static int lan966x_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err = 0; + + if (ctx && ctx != port) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + lan966x_port_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + lan966x_port_stp_state_set(port, attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + lan966x_port_ageing_set(port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering); + lan966x_vlan_port_apply(port); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_port_bridge_join(struct lan966x_port *port, + struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + struct net_device *dev = port->dev; + int err; + + if (!lan966x->bridge_mask) { + lan966x->bridge = bridge; + } else { + if (lan966x->bridge != bridge) { + NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge"); + return -ENODEV; + } + } + + err = switchdev_bridge_port_offload(dev, dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb, + false, extack); + if (err) + return err; + + lan966x->bridge_mask |= BIT(port->chip_port); + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + lan966x_port_bridge_flags(port, flags); + + return 0; +} + +static void lan966x_port_bridge_leave(struct lan966x_port *port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags = {0}; + struct lan966x *lan966x = port->lan966x; + + flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask & ~BR_LEARNING; + lan966x_port_bridge_flags(port, flags); + + lan966x->bridge_mask &= ~BIT(port->chip_port); + + if (!lan966x->bridge_mask) + lan966x->bridge = NULL; + + /* Set the port back to host mode */ + lan966x_vlan_port_set_vlan_aware(port, false); + lan966x_vlan_port_set_vid(port, HOST_PVID, false, false); + lan966x_vlan_port_apply(port); +} + +static int lan966x_port_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; + int err = 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = lan966x_port_bridge_join(port, info->upper_dev, + extack); + else + lan966x_port_bridge_leave(port, info->upper_dev); + } + + return err; +} + +static int lan966x_port_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct lan966x_port *port = netdev_priv(dev); + + if (netif_is_bridge_master(info->upper_dev) && !info->linking) + switchdev_bridge_port_unoffload(port->dev, port, + &lan966x_switchdev_nb, + &lan966x_switchdev_blocking_nb); + + return NOTIFY_DONE; +} + +static int lan966x_foreign_bridging_check(struct net_device *bridge, + struct netlink_ext_ack *extack) +{ + struct lan966x *lan966x = NULL; + bool has_foreign = false; + struct net_device *dev; + struct list_head *iter; + + if (!netif_is_bridge_master(bridge)) + return 0; + + netdev_for_each_lower_dev(bridge, dev, iter) { + if (lan966x_netdevice_check(dev)) { + struct lan966x_port *port = netdev_priv(dev); + + if (lan966x) { + /* Bridge already has at least one port of a + * lan966x switch inside it, check that it's + * the same instance of the driver. + */ + if (port->lan966x != lan966x) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging between multiple lan966x switches disallowed"); + return -EINVAL; + } + } else { + /* This is the first lan966x port inside this + * bridge + */ + lan966x = port->lan966x; + } + } else { + has_foreign = true; + } + + if (lan966x && has_foreign) { + NL_SET_ERR_MSG_MOD(extack, + "Bridging lan966x ports with foreign interfaces disallowed"); + return -EINVAL; + } + } + + return 0; +} + +static int lan966x_bridge_check(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + return lan966x_foreign_bridging_check(info->upper_dev, + info->info.extack); +} + +static int lan966x_netdevice_port_event(struct net_device *dev, + struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + if (!lan966x_netdevice_check(dev)) { + if (event == NETDEV_CHANGEUPPER) + return lan966x_bridge_check(dev, ptr); + return 0; + } + + switch (event) { + case NETDEV_PRECHANGEUPPER: + err = lan966x_port_prechangeupper(dev, ptr); + break; + case NETDEV_CHANGEUPPER: + err = lan966x_bridge_check(dev, ptr); + if (err) + return err; + + err = lan966x_port_changeupper(dev, ptr); + break; + } + + return err; +} + +static int lan966x_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret; + + ret = lan966x_netdevice_port_event(dev, nb, event, ptr); + + return notifier_from_errno(ret); +} + +static bool lan966x_foreign_dev_check(const struct net_device *dev, + const struct net_device *foreign_dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + + if (netif_is_bridge_master(foreign_dev)) + if (lan966x->bridge != foreign_dev) + return true; + + return false; +} + +static int lan966x_switchdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + err = switchdev_handle_fdb_event_to_device(dev, event, ptr, + lan966x_netdevice_check, + lan966x_foreign_dev_check, + lan966x_handle_fdb, + NULL); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static int lan966x_handle_port_vlan_add(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + /* When adding a port to a vlan, we get a callback for the port but + * also for the bridge. When get the callback for the bridge just bail + * out. Then when the bridge is added to the vlan, then we get a + * callback here but in this case the flags has set: + * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU + * port is added to the vlan, so the broadcast frames and unicast frames + * with dmac of the bridge should be foward to CPU. + */ + if (netif_is_bridge_master(obj->orig_dev) && + !(v->flags & BRIDGE_VLAN_INFO_BRENTRY)) + return 0; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_add_vlan(port, v->vid, + v->flags & BRIDGE_VLAN_INFO_PVID, + v->flags & BRIDGE_VLAN_INFO_UNTAGGED); + else + lan966x_vlan_cpu_add_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_add(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_add(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_handle_port_vlan_del(struct lan966x_port *port, + const struct switchdev_obj *obj) +{ + const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct lan966x *lan966x = port->lan966x; + + if (!netif_is_bridge_master(obj->orig_dev)) + lan966x_vlan_port_del_vlan(port, v->vid); + else + lan966x_vlan_cpu_del_vlan(lan966x, v->vid); + + return 0; +} + +static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct lan966x_port *port = netdev_priv(dev); + int err; + + if (ctx && ctx != port) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = lan966x_handle_port_vlan_del(port, obj); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = lan966x_handle_port_mdb_del(port, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int lan966x_switchdev_blocking_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + lan966x_netdevice_check, + lan966x_handle_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + lan966x_netdevice_check, + lan966x_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static struct notifier_block lan966x_netdevice_nb __read_mostly = { + .notifier_call = lan966x_netdevice_event, +}; + +static struct notifier_block lan966x_switchdev_nb __read_mostly = { + .notifier_call = lan966x_switchdev_event, +}; + +static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = { + .notifier_call = lan966x_switchdev_blocking_event, +}; + +void lan966x_register_notifier_blocks(void) +{ + register_netdevice_notifier(&lan966x_netdevice_nb); + register_switchdev_notifier(&lan966x_switchdev_nb); + register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); +} + +void lan966x_unregister_notifier_blocks(void) +{ + unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb); + unregister_switchdev_notifier(&lan966x_switchdev_nb); + unregister_netdevice_notifier(&lan966x_netdevice_nb); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c new file mode 100644 index 000000000000..8d7260cd7da9 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +#define VLANACCESS_CMD_IDLE 0 +#define VLANACCESS_CMD_READ 1 +#define VLANACCESS_CMD_WRITE 2 +#define VLANACCESS_CMD_INIT 3 + +static int lan966x_vlan_get_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, ANA_VLANACCESS); +} + +static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x) +{ + u32 val; + + return readx_poll_timeout(lan966x_vlan_get_status, + lan966x, val, + (val & ANA_VLANACCESS_VLAN_TBL_CMD) == + VLANACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); +} + +static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid) +{ + u16 mask = lan966x->vlan_mask[vid]; + bool cpu_dis; + + cpu_dis = !(mask & BIT(CPU_PORT)); + + /* Set flags and the VID to configure */ + lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) | + ANA_VLANTIDX_V_INDEX_SET(vid), + ANA_VLANTIDX_VLAN_PGID_CPU_DIS | + ANA_VLANTIDX_V_INDEX, + lan966x, ANA_VLANTIDX); + + /* Set the vlan port members mask */ + lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask), + ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, + lan966x, ANA_VLAN_PORT_MASK); + + /* Issue a write command */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + + if (lan966x_vlan_wait_for_completion(lan966x)) + dev_err(lan966x->dev, "Vlan set mask failed\n"); +} + +static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] |= BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + u8 p = port->chip_port; + + lan966x->vlan_mask[vid] &= ~BIT(p); + lan966x_vlan_set_mask(lan966x, vid); +} + +static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT)); +} + +static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] |= BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, vid); +} + +static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __set_bit(vid, lan966x->cpu_vlan_mask); +} + +static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + __clear_bit(vid, lan966x->cpu_vlan_mask); +} + +bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid) +{ + return test_bit(vid, lan966x->cpu_vlan_mask); +} + +static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + + if (!(lan966x->bridge_mask & BIT(port->chip_port))) + return HOST_PVID; + + return port->vlan_aware ? port->pvid : UNAWARE_PVID; +} + +int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid, + bool pvid, bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* Egress vlan classification */ + if (untagged && port->vid != vid) { + if (port->vid) { + dev_err(lan966x->dev, + "Port already has a native VLAN: %d\n", + port->vid); + return -EBUSY; + } + port->vid = vid; + } + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + return 0; +} + +static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid) +{ + if (port->pvid == vid) + port->pvid = 0; + + if (port->vid == vid) + port->vid = 0; +} + +void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port, + bool vlan_aware) +{ + port->vlan_aware = vlan_aware; +} + +void lan966x_vlan_port_apply(struct lan966x_port *port) +{ + struct lan966x *lan966x = port->lan966x; + u16 pvid; + u32 val; + + pvid = lan966x_vlan_port_get_pvid(port); + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to classify for untagged frames (may be zero) */ + val = ANA_VLAN_CFG_VLAN_VID_SET(pvid); + if (port->vlan_aware) + val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1); + + lan_rmw(val, + ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA | + ANA_VLAN_CFG_VLAN_POP_CNT, + lan966x, ANA_VLAN_CFG(port->chip_port)); + + /* Drop frames with multicast source address */ + val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); + if (port->vlan_aware && !pvid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1); + + lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port)); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */ + val = REW_TAG_CFG_TAG_TPID_CFG_SET(0); + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG_SET(1); + else + val |= REW_TAG_CFG_TAG_CFG_SET(3); + } + + /* Update only some bits in the register */ + lan_rmw(val, + REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG, + lan966x, REW_TAG_CFG(port->chip_port)); + + /* Set default VLAN and tag type to 8021Q */ + lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid), + REW_PORT_VLAN_CFG_PORT_TPID | + REW_PORT_VLAN_CFG_PORT_VID, + lan966x, REW_PORT_VLAN_CFG(port->chip_port)); +} + +void lan966x_vlan_port_add_vlan(struct lan966x_port *port, + u16 vid, + bool pvid, + bool untagged) +{ + struct lan966x *lan966x = port->lan966x; + + /* If the CPU(br) is already part of the vlan then add the fdb + * entries in MAC table to copy the frames to the CPU(br). + * If the CPU(br) is not part of the vlan then it would + * just drop the frames. + */ + if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_port_set_vid(port, vid, pvid, untagged); + lan966x_vlan_port_add_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); +} + +void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid) +{ + struct lan966x *lan966x = port->lan966x; + + lan966x_vlan_port_remove_vid(port, vid); + lan966x_vlan_port_del_vlan_mask(port, vid); + lan966x_vlan_port_apply(port); + + /* In case there are no other ports in vlan then remove the CPU from + * that vlan but still keep it in the mask because it may be needed + * again then another port gets added in that vlan + */ + if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); + } +} + +void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Add an entry in the MAC table for the CPU + * Add the CPU part of the vlan only if there is another port in that + * vlan otherwise all the broadcast frames in that vlan will go to CPU + * even if none of the ports are in the vlan and then the CPU will just + * need to discard these frames. It is required to store this + * information so when a front port is added then it would add also the + * CPU port. + */ + if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) { + lan966x_vlan_cpu_add_vlan_mask(lan966x, vid); + lan966x_mdb_write_entries(lan966x, vid); + } + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid); + lan966x_fdb_write_entries(lan966x, vid); +} + +void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid) +{ + /* Remove the CPU part of the vlan */ + lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid); + lan966x_vlan_cpu_del_vlan_mask(lan966x, vid); + lan966x_fdb_erase_entries(lan966x, vid); + lan966x_mdb_erase_entries(lan966x, vid); +} + +void lan966x_vlan_init(struct lan966x *lan966x) +{ + u16 port, vid; + + /* Clear VLAN table, by default all ports are members of all VLANS */ + lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT), + ANA_VLANACCESS_VLAN_TBL_CMD, + lan966x, ANA_VLANACCESS); + lan966x_vlan_wait_for_completion(lan966x); + + for (vid = 1; vid < VLAN_N_VID; vid++) { + lan966x->vlan_mask[vid] = 0; + lan966x_vlan_set_mask(lan966x, vid); + } + + /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */ + lan966x->vlan_mask[HOST_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, HOST_PVID); + + lan966x->vlan_mask[UNAWARE_PVID] = + GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT); + lan966x_vlan_set_mask(lan966x, UNAWARE_PVID); + + lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID); + + /* Configure the CPU port to be vlan aware */ + lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) | + ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1), + lan966x, ANA_VLAN_CFG(CPU_PORT)); + + /* Set vlan ingress filter mask to all ports */ + lan_wr(GENMASK(lan966x->num_phys_ports, 0), + lan966x, ANA_VLANMASK); + + for (port = 0; port < lan966x->num_phys_ports; port++) { + lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port)); + lan_wr(0, lan966x, REW_TAG_CFG(port)); + } +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 4625d4fb4cde..16266275dd36 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -292,6 +292,33 @@ static int sparx5_create_port(struct sparx5 *sparx5, spx5_port->phylink_config.dev = &spx5_port->ndev->dev; spx5_port->phylink_config.type = PHYLINK_NETDEV; spx5_port->phylink_config.pcs_poll = true; + spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | + MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD; + + __set_bit(PHY_INTERFACE_MODE_SGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_QSGMII, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + spx5_port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_5000 || + spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_5GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_10000 || + spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_10GBASER, + spx5_port->phylink_config.supported_interfaces); + + if (spx5_port->conf.bandwidth == SPEED_25000) + __set_bit(PHY_INTERFACE_MODE_25GBASER, + spx5_port->phylink_config.supported_interfaces); phylink = phylink_create(&spx5_port->phylink_config, of_fwnode_handle(config->node), diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c index fb74752de0ca..8ba33bc1a001 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c @@ -26,79 +26,6 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po return false; } -static void sparx5_phylink_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct sparx5_port *port = netdev_priv(to_net_dev(config->dev)); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - - switch (state->interface) { - case PHY_INTERFACE_MODE_5GBASER: - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_25GBASER: - case PHY_INTERFACE_MODE_NA: - if (port->conf.bandwidth == SPEED_5000) - phylink_set(mask, 5000baseT_Full); - if (port->conf.bandwidth == SPEED_10000) { - phylink_set(mask, 5000baseT_Full); - phylink_set(mask, 10000baseT_Full); - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); - } - if (port->conf.bandwidth == SPEED_25000) { - phylink_set(mask, 5000baseT_Full); - phylink_set(mask, 10000baseT_Full); - phylink_set(mask, 10000baseCR_Full); - phylink_set(mask, 10000baseSR_Full); - phylink_set(mask, 10000baseLR_Full); - phylink_set(mask, 10000baseLRM_Full); - phylink_set(mask, 10000baseER_Full); - phylink_set(mask, 25000baseCR_Full); - phylink_set(mask, 25000baseSR_Full); - } - if (state->interface != PHY_INTERFACE_MODE_NA) - break; - fallthrough; - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - if (state->interface != PHY_INTERFACE_MODE_NA) - break; - fallthrough; - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - if (state->interface == PHY_INTERFACE_MODE_2500BASEX || - state->interface == PHY_INTERFACE_MODE_NA) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } - break; - default: - linkmode_zero(supported); - return; - } - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - static void sparx5_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) @@ -202,7 +129,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = { }; const struct phylink_mac_ops sparx5_phylink_mac_ops = { - .validate = sparx5_phylink_validate, + .validate = phylink_generic_validate, .mac_config = sparx5_phylink_mac_config, .mac_link_down = sparx5_phylink_mac_link_down, .mac_link_up = sparx5_phylink_mac_link_up, diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile index 0edd5bb685f3..e16a4221f571 100644 --- a/drivers/net/ethernet/microsoft/mana/Makefile +++ b/drivers/net/ethernet/microsoft/mana/Makefile @@ -3,4 +3,4 @@ # Makefile for the Microsoft Azure Network Adapter driver obj-$(CONFIG_MICROSOFT_MANA) += mana.o -mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o +mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o mana_bpf.o diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 34b971ff8ef8..078d6a5a0768 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, if (err) goto out; - err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, - &hwc_wq->msg_buf); - if (err) - goto out; - hwc_wq->hwc = hwc; hwc_wq->gdma_wq = queue; hwc_wq->queue_depth = q_depth; hwc_wq->hwc_cq = hwc_cq; + err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, + &hwc_wq->msg_buf); + if (err) + goto out; + *hwc_wq_ptr = hwc_wq; return 0; out: diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index d047ee876f12..9a12607fb511 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -289,6 +289,8 @@ struct mana_rxq { struct mana_cq rx_cq; + struct completion fence_event; + struct net_device *ndev; /* Total number of receive buffers to be allocated */ @@ -298,6 +300,9 @@ struct mana_rxq { struct mana_stats stats; + struct bpf_prog __rcu *bpf_prog; + struct xdp_rxq_info xdp_rxq; + /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. */ @@ -353,6 +358,8 @@ struct mana_port_context { /* This points to an array of num_queues of RQ pointers. */ struct mana_rxq **rxqs; + struct bpf_prog *bpf_prog; + /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ unsigned int max_queues; unsigned int num_queues; @@ -367,6 +374,7 @@ struct mana_port_context { struct mana_ethtool_stats eth_stats; }; +int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, bool update_hash, bool update_tab); @@ -377,6 +385,13 @@ int mana_detach(struct net_device *ndev, bool from_close); int mana_probe(struct gdma_dev *gd, bool resuming); void mana_remove(struct gdma_dev *gd, bool suspending); +void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); +u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, + struct xdp_buff *xdp, void *buf_va, uint pkt_len); +struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); +void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); +int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); + extern const struct ethtool_ops mana_ethtool_ops; struct mana_obj_spec { diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c new file mode 100644 index 000000000000..1d2f948b5c00 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2021, Microsoft Corporation. */ + +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/mm.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#include <net/xdp.h> + +#include "mana.h" + +void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) +{ + u16 txq_idx = skb_get_queue_mapping(skb); + struct netdev_queue *ndevtxq; + int rc; + + __skb_push(skb, ETH_HLEN); + + ndevtxq = netdev_get_tx_queue(ndev, txq_idx); + __netif_tx_lock(ndevtxq, smp_processor_id()); + + rc = mana_start_xmit(skb, ndev); + + __netif_tx_unlock(ndevtxq); + + if (dev_xmit_complete(rc)) + return; + + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; +} + +u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, + struct xdp_buff *xdp, void *buf_va, uint pkt_len) +{ + struct bpf_prog *prog; + u32 act = XDP_PASS; + + rcu_read_lock(); + prog = rcu_dereference(rxq->bpf_prog); + + if (!prog) + goto out; + + xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); + xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false); + + act = bpf_prog_run_xdp(prog, xdp); + + switch (act) { + case XDP_PASS: + case XDP_TX: + case XDP_DROP: + break; + + case XDP_ABORTED: + trace_xdp_exception(ndev, prog, act); + break; + + default: + bpf_warn_invalid_xdp_action(ndev, prog, act); + } + +out: + rcu_read_unlock(); + + return act; +} + +static unsigned int mana_xdp_fraglen(unsigned int len) +{ + return SKB_DATA_ALIGN(len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +struct bpf_prog *mana_xdp_get(struct mana_port_context *apc) +{ + ASSERT_RTNL(); + + return apc->bpf_prog; +} + +static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc) +{ + return rtnl_dereference(apc->rxqs[0]->bpf_prog); +} + +/* Set xdp program on channels */ +void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog = mana_chn_xdp_get(apc); + unsigned int num_queues = apc->num_queues; + int i; + + ASSERT_RTNL(); + + if (old_prog == prog) + return; + + if (prog) + bpf_prog_add(prog, num_queues); + + for (i = 0; i < num_queues; i++) + rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog); + + if (old_prog) + for (i = 0; i < num_queues; i++) + bpf_prog_put(old_prog); +} + +static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct mana_port_context *apc = netdev_priv(ndev); + struct bpf_prog *old_prog; + int buf_max; + + old_prog = mana_xdp_get(apc); + + if (!old_prog && !prog) + return 0; + + buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN); + if (prog && buf_max > PAGE_SIZE) { + netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n", + ndev->mtu, buf_max); + NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); + + return -EOPNOTSUPP; + } + + /* One refcnt of the prog is hold by the caller already, so + * don't increase refcnt for this one. + */ + apc->bpf_prog = prog; + + if (old_prog) + bpf_prog_put(old_prog); + + if (apc->port_is_up) + mana_chn_setxdp(apc, prog); + + return 0; +} + +int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + struct netlink_ext_ack *extack = bpf->extack; + int ret; + + switch (bpf->command) { + case XDP_SETUP_PROG: + return mana_xdp_set(ndev, bpf->prog, extack); + + default: + return -EOPNOTSUPP; + } + + return ret; +} diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 72cbf45c42d8..498d0f999275 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2021, Microsoft Corporation. */ +#include <uapi/linux/bpf.h> + #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -125,7 +127,7 @@ frag_err: return -ENOMEM; } -static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) +int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) { enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT; struct mana_port_context *apc = netdev_priv(ndev); @@ -378,6 +380,7 @@ static const struct net_device_ops mana_devops = { .ndo_start_xmit = mana_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = mana_get_stats64, + .ndo_bpf = mana_bpf, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -749,6 +752,61 @@ out: return err; } +static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) +{ + struct mana_fence_rq_resp resp = {}; + struct mana_fence_rq_req req = {}; + int err; + + init_completion(&rxq->fence_event); + + mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ, + sizeof(req), sizeof(resp)); + req.wq_obj_handle = rxq->rxobj; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + if (err) { + netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", + rxq->rxq_idx, err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp)); + if (err || resp.hdr.status) { + netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", + rxq->rxq_idx, err, resp.hdr.status); + if (!err) + err = -EPROTO; + + return err; + } + + if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { + netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", + rxq->rxq_idx); + return -ETIMEDOUT; + } + + return 0; +} + +static void mana_fence_rqs(struct mana_port_context *apc) +{ + unsigned int rxq_idx; + struct mana_rxq *rxq; + int err; + + for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { + rxq = apc->rxqs[rxq_idx]; + err = mana_fence_rq(apc, rxq); + + /* In case of any error, use sleep instead. */ + if (err) + msleep(100); + } +} + static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) { u32 used_space_old; @@ -906,6 +964,25 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); } +static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, + struct xdp_buff *xdp) +{ + struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE); + + if (!skb) + return NULL; + + if (xdp->data_hard_start) { + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + } else { + skb_reserve(skb, XDP_PACKET_HEADROOM); + skb_put(skb, pkt_len); + } + + return skb; +} + static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq) { @@ -914,8 +991,10 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, uint pkt_len = cqe->ppi[0].pkt_len; u16 rxq_idx = rxq->rxq_idx; struct napi_struct *napi; + struct xdp_buff xdp = {}; struct sk_buff *skb; u32 hash_value; + u32 act; rxq->rx_cq.work_done++; napi = &rxq->rx_cq.napi; @@ -925,15 +1004,16 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, return; } - skb = build_skb(buf_va, PAGE_SIZE); + act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len); - if (!skb) { - free_page((unsigned long)buf_va); - ++ndev->stats.rx_dropped; - return; - } + if (act != XDP_PASS && act != XDP_TX) + goto drop; + + skb = mana_build_skb(buf_va, pkt_len, &xdp); + + if (!skb) + goto drop; - skb_put(skb, pkt_len); skb->dev = napi->dev; skb->protocol = eth_type_trans(skb, ndev); @@ -954,12 +1034,24 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3); } + if (act == XDP_TX) { + skb_set_queue_mapping(skb, rxq_idx); + mana_xdp_tx(skb, ndev); + return; + } + napi_gro_receive(napi, skb); u64_stats_update_begin(&rx_stats->syncp); rx_stats->packets++; rx_stats->bytes += pkt_len; u64_stats_update_end(&rx_stats->syncp); + return; + +drop: + free_page((unsigned long)buf_va); + ++ndev->stats.rx_dropped; + return; } static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, @@ -988,7 +1080,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, return; case CQE_RX_OBJECT_FENCE: - netdev_err(ndev, "RX Fencing is unsupported\n"); + complete(&rxq->fence_event); return; default: @@ -1016,7 +1108,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, new_page = alloc_page(GFP_ATOMIC); if (new_page) { - da = dma_map_page(dev, new_page, 0, rxq->datasize, + da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { @@ -1291,6 +1383,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc, napi_synchronize(napi); napi_disable(napi); + + xdp_rxq_info_unreg(&rxq->xdp_rxq); + netif_napi_del(napi); mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); @@ -1342,7 +1437,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, if (!page) return -ENOMEM; - da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE); + da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize, + DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { __free_page(page); @@ -1485,6 +1581,12 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; netif_napi_add(ndev, &cq->napi, mana_poll, 1); + + WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, + cq->napi.napi_id)); + WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + napi_enable(&cq->napi); mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); @@ -1572,6 +1674,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, bool update_hash, bool update_tab) { u32 queue_idx; + int err; int i; if (update_tab) { @@ -1581,7 +1684,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, } } - return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); + err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); + if (err) + return err; + + mana_fence_rqs(apc); + + return 0; } static int mana_init_port(struct net_device *ndev) @@ -1650,6 +1759,8 @@ int mana_alloc_queues(struct net_device *ndev) if (err) goto destroy_vport; + mana_chn_setxdp(apc, mana_xdp_get(apc)); + return 0; destroy_vport: @@ -1698,6 +1809,8 @@ static int mana_dealloc_queues(struct net_device *ndev) if (apc->port_is_up) return -EINVAL; + mana_chn_setxdp(apc, NULL); + /* No packet can be transmitted now since apc->port_is_up is false. * There is still a tiny chance that mana_poll_tx_cq() can re-enable * a txq because it may not timely see apc->port_is_up being cleared @@ -1724,9 +1837,6 @@ static int mana_dealloc_queues(struct net_device *ndev) return err; } - /* TODO: Implement RX fencing */ - ssleep(1); - mana_destroy_vport(apc); return 0; diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 722c27694b21..41b34a509308 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -7,9 +7,11 @@ mscc_ocelot_switch_lib-y := \ ocelot_vcap.o \ ocelot_flower.o \ ocelot_ptp.o \ - ocelot_devlink.o + ocelot_devlink.o \ + vsc7514_regs.o mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o mscc_ocelot-y := \ + ocelot_fdma.o \ ocelot_vsc7514.o \ ocelot_net.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e6c18b598d5c..b1311b656e17 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -61,9 +61,9 @@ static void ocelot_mact_select(struct ocelot *ocelot, } -int ocelot_mact_learn(struct ocelot *ocelot, int port, - const unsigned char mac[ETH_ALEN], - unsigned int vid, enum macaccess_entry_type type) +static int __ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) { u32 cmd = ANA_TABLES_MACACCESS_VALID | ANA_TABLES_MACACCESS_DEST_IDX(port) | @@ -83,8 +83,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, if (mc_ports & BIT(ocelot->num_phys_ports)) cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; - mutex_lock(&ocelot->mact_lock); - ocelot_mact_select(ocelot, mac, vid); /* Issue a write command */ @@ -92,9 +90,20 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, err = ocelot_mact_wait_for_completion(ocelot); + return err; +} + +int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) +{ + int ret; + + mutex_lock(&ocelot->mact_lock); + ret = __ocelot_mact_learn(ocelot, port, mac, vid, type); mutex_unlock(&ocelot->mact_lock); - return err; + return ret; } EXPORT_SYMBOL(ocelot_mact_learn); @@ -120,6 +129,66 @@ int ocelot_mact_forget(struct ocelot *ocelot, } EXPORT_SYMBOL(ocelot_mact_forget); +int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type *type) +{ + int val; + + mutex_lock(&ocelot->mact_lock); + + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a read command with MACACCESS_VALID=1. */ + ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), + ANA_TABLES_MACACCESS); + + if (ocelot_mact_wait_for_completion(ocelot)) { + mutex_unlock(&ocelot->mact_lock); + return -ETIMEDOUT; + } + + /* Read back the entry flags */ + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + + mutex_unlock(&ocelot->mact_lock); + + if (!(val & ANA_TABLES_MACACCESS_VALID)) + return -ENOENT; + + *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val); + *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val); + + return 0; +} +EXPORT_SYMBOL(ocelot_mact_lookup); + +int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type, + int sfid, int ssid) +{ + int ret; + + mutex_lock(&ocelot->mact_lock); + + ocelot_write(ocelot, + (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) | + ANA_TABLES_STREAMDATA_SFID(sfid) | + (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) | + ANA_TABLES_STREAMDATA_SSID(ssid), + ANA_TABLES_STREAMDATA); + + ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type); + + mutex_unlock(&ocelot->mact_lock); + + return ret; +} +EXPORT_SYMBOL(ocelot_mact_learn_streamdata); + static void ocelot_mact_init(struct ocelot *ocelot) { /* Configure the learning mode entries attributes: @@ -594,9 +663,17 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, struct ocelot_port *ocelot_port = ocelot->ports[port]; int err; + ocelot_port->speed = SPEED_UNKNOWN; + ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, DEV_MAC_ENA_CFG); + if (ocelot->ops->cut_through_fwd) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ops->cut_through_fwd(ocelot); + mutex_unlock(&ocelot->fwd_domain_lock); + } + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); err = ocelot_port_flush(ocelot, port); @@ -628,6 +705,8 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, int mac_speed, mode = 0; u32 mac_fc_cfg; + ocelot_port->speed = speed; + /* The MAC might be integrated in systems where the MAC speed is fixed * and it's the PCS who is performing the rate adaptation, so we have * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG @@ -700,6 +779,15 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); + /* If the port supports cut-through forwarding, update the masks before + * enabling forwarding on the port. + */ + if (ocelot->ops->cut_through_fwd) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ops->cut_through_fwd(ocelot); + mutex_unlock(&ocelot->fwd_domain_lock); + } + /* Core: Enable port for frame transfer */ ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); @@ -966,14 +1054,34 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh) return 0; } -int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb, + u64 timestamp) { struct skb_shared_hwtstamps *shhwtstamps; u64 tod_in_ns, full_ts_in_ns; + struct timespec64 ts; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + + tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); + if ((tod_in_ns & 0xffffffff) < timestamp) + full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | + timestamp; + else + full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | + timestamp; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = full_ts_in_ns; +} +EXPORT_SYMBOL(ocelot_ptp_rx_timestamp); + +int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) +{ u64 timestamp, src_port, len; u32 xfh[OCELOT_TAG_LEN / 4]; struct net_device *dev; - struct timespec64 ts; struct sk_buff *skb; int sz, buf_len; u32 val, *buf; @@ -1029,21 +1137,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb) *buf = val; } - if (ocelot->ptp) { - ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); - - tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec); - if ((tod_in_ns & 0xffffffff) < timestamp) - full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) | - timestamp; - else - full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) | - timestamp; - - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamps->hwtstamp = full_ts_in_ns; - } + if (ocelot->ptp) + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); /* Everything we see on an interface that is in the HW bridge * has already been forwarded. @@ -1076,6 +1171,18 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp) } EXPORT_SYMBOL(ocelot_can_inject); +void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag) +{ + ocelot_ifh_set_bypass(ifh, 1); + ocelot_ifh_set_dest(ifh, BIT_ULL(port)); + ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); + if (vlan_tag) + ocelot_ifh_set_vlan_tci(ifh, vlan_tag); + if (rew_op) + ocelot_ifh_set_rew_op(ifh, rew_op); +} +EXPORT_SYMBOL(ocelot_ifh_port_set); + void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, u32 rew_op, struct sk_buff *skb) { @@ -1085,11 +1192,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); - ocelot_ifh_set_bypass(ifh, 1); - ocelot_ifh_set_dest(ifh, BIT_ULL(port)); - ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C); - ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb)); - ocelot_ifh_set_rew_op(ifh, rew_op); + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); for (i = 0; i < OCELOT_TAG_LEN / 4; i++) ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); @@ -1238,6 +1341,43 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, return 0; } +int ocelot_mact_flush(struct ocelot *ocelot, int port) +{ + int err; + + mutex_lock(&ocelot->mact_lock); + + /* Program ageing filter for a single port */ + ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port), + ANA_ANAGEFIL); + + /* Flushing dynamic FDB entries requires two successive age scans */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE), + ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + if (err) { + mutex_unlock(&ocelot->mact_lock); + return err; + } + + /* And second... */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE), + ANA_TABLES_MACACCESS); + + err = ocelot_mact_wait_for_completion(ocelot); + + /* Restore ageing filter */ + ocelot_write(ocelot, 0, ANA_ANAGEFIL); + + mutex_unlock(&ocelot->mact_lock); + + return err; +} +EXPORT_SYMBOL_GPL(ocelot_mact_flush); + int ocelot_fdb_dump(struct ocelot *ocelot, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -1278,6 +1418,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_dump); +static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588); + *(__be16 *)trap->key.etype.etype.mask = htons(0xffff); +} + +static void +ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_EV_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_EV_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV4; + trap->key.ipv4.dport.value = PTP_GEN_PORT; + trap->key.ipv4.dport.mask = 0xffff; +} + +static void +ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap) +{ + trap->key_type = OCELOT_VCAP_KEY_IPV6; + trap->key.ipv6.dport.value = PTP_GEN_PORT; + trap->key.ipv6.dport.mask = 0xffff; +} + +static int ocelot_trap_add(struct ocelot *ocelot, int port, + unsigned long cookie, + void (*populate)(struct ocelot_vcap_filter *f)) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + bool new = false; + int err; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) { + trap = kzalloc(sizeof(*trap), GFP_KERNEL); + if (!trap) + return -ENOMEM; + + populate(trap); + trap->prio = 1; + trap->id.cookie = cookie; + trap->id.tc_offload = false; + trap->block_id = VCAP_IS2; + trap->type = OCELOT_VCAP_FILTER_OFFLOAD; + trap->lookup = 0; + trap->action.cpu_copy_ena = true; + trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + trap->action.port_mask = 0; + new = true; + } + + trap->ingress_port_mask |= BIT(port); + + if (new) + err = ocelot_vcap_filter_add(ocelot, trap, NULL); + else + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) { + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + kfree(trap); + return err; + } + + return 0; +} + +static int ocelot_trap_del(struct ocelot *ocelot, int port, + unsigned long cookie) +{ + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, + false); + if (!trap) + return 0; + + trap->ingress_port_mask &= ~BIT(port); + if (!trap->ingress_port_mask) + return ocelot_vcap_filter_del(ocelot, trap); + + return ocelot_vcap_filter_replace(ocelot, trap); +} + +static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_add(ocelot, port, l2_cookie, + ocelot_populate_l2_ptp_trap_key); +} + +static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long l2_cookie = ocelot->num_phys_ports + 1; + + return ocelot_trap_del(ocelot, port, l2_cookie); +} + +static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, + ocelot_populate_ipv4_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, + ocelot_populate_ipv4_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + + return err; +} + +static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2; + unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3; + int err; + + err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie); + return err; +} + +static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, + ocelot_populate_ipv6_ptp_event_trap_key); + if (err) + return err; + + err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, + ocelot_populate_ipv6_ptp_general_trap_key); + if (err) + ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + + return err; +} + +static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port) +{ + unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4; + unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5; + int err; + + err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie); + err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie); + return err; +} + +static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port, + bool l2, bool l4) +{ + int err; + + if (l2) + err = ocelot_l2_ptp_trap_add(ocelot, port); + else + err = ocelot_l2_ptp_trap_del(ocelot, port); + if (err) + return err; + + if (l4) { + err = ocelot_ipv4_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv4; + + err = ocelot_ipv6_ptp_trap_add(ocelot, port); + if (err) + goto err_ipv6; + } else { + err = ocelot_ipv4_ptp_trap_del(ocelot, port); + + err |= ocelot_ipv6_ptp_trap_del(ocelot, port); + } + if (err) + return err; + + return 0; + +err_ipv6: + ocelot_ipv4_ptp_trap_del(ocelot, port); +err_ipv4: + if (l2) + ocelot_l2_ptp_trap_del(ocelot, port); + return err; +} + int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, @@ -1288,15 +1647,13 @@ EXPORT_SYMBOL(ocelot_hwstamp_get); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + bool l2 = false, l4 = false; struct hwtstamp_config cfg; + int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - /* Tx type sanity check */ switch (cfg.tx_type) { case HWTSTAMP_TX_ON: @@ -1320,28 +1677,42 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + l4 = true; + break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + l2 = true; + break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + l2 = true; + l4 = true; break; default: mutex_unlock(&ocelot->ptp_lock); return -ERANGE; } + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); + if (err) { + mutex_unlock(&ocelot->ptp_lock); + return err; + } + + if (l2 && l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + else if (l2) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else if (l4) + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + else + cfg.rx_filter = HWTSTAMP_FILTER_NONE; + /* Commit back the result & save it */ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg)); mutex_unlock(&ocelot->ptp_lock); @@ -1444,14 +1815,16 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) | BIT(HWTSTAMP_TX_ONESTEP_SYNC); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT); return 0; } EXPORT_SYMBOL(ocelot_get_ts_info); -static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, - bool only_active_ports) +static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond) { u32 mask = 0; int port; @@ -1462,26 +1835,25 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond, if (!ocelot_port) continue; - if (ocelot_port->bond == bond) { - if (only_active_ports && !ocelot_port->lag_tx_active) - continue; - + if (ocelot_port->bond == bond) mask |= BIT(port); - } } return mask; } -static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port, - struct net_device *bridge) +u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) { struct ocelot_port *ocelot_port = ocelot->ports[src_port]; + const struct net_device *bridge; u32 mask = 0; int port; - if (!ocelot_port || ocelot_port->bridge != bridge || - ocelot_port->stp_state != BR_STATE_FORWARDING) + if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) + return 0; + + bridge = ocelot_port->bridge; + if (!bridge) return 0; for (port = 0; port < ocelot->num_phys_ports; port++) { @@ -1497,8 +1869,9 @@ static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port, return mask; } +EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask); -static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) +u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) { u32 mask = 0; int port; @@ -1515,12 +1888,22 @@ static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) return mask; } +EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask); -void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) +void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) { unsigned long cpu_fwd_mask; int port; + lockdep_assert_held(&ocelot->fwd_domain_lock); + + /* If cut-through forwarding is supported, update the masks before a + * port joins the forwarding domain, to avoid potential underruns if it + * has the highest speed from the new domain. + */ + if (joining && ocelot->ops->cut_through_fwd) + ocelot->ops->cut_through_fwd(ocelot); + /* If a DSA tag_8021q CPU exists, it needs to be included in the * regular forwarding path of the front ports regardless of whether * those are bridged or standalone. @@ -1548,16 +1931,13 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) mask = GENMASK(ocelot->num_phys_ports - 1, 0); mask &= ~cpu_fwd_mask; } else if (ocelot_port->bridge) { - struct net_device *bridge = ocelot_port->bridge; struct net_device *bond = ocelot_port->bond; - mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge); + mask = ocelot_get_bridge_fwd_mask(ocelot, port); mask |= cpu_fwd_mask; mask &= ~BIT(port); - if (bond) { - mask &= ~ocelot_get_bond_mask(ocelot, bond, - false); - } + if (bond) + mask &= ~ocelot_get_bond_mask(ocelot, bond); } else { /* Standalone ports forward only to DSA tag_8021q CPU * ports (if those exist), or to the hardware CPU port @@ -1568,6 +1948,16 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); } + + /* If cut-through forwarding is supported and a port is leaving, there + * is a chance that cut-through was disabled on the other ports due to + * the port which is leaving (it has a higher link speed). We need to + * update the cut-through masks of the remaining ports no earlier than + * after the port has left, to prevent underruns from happening between + * the cut-through update and the forwarding domain update. + */ + if (!joining && ocelot->ops->cut_through_fwd) + ocelot->ops->cut_through_fwd(ocelot); } EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); @@ -1576,6 +1966,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) struct ocelot_port *ocelot_port = ocelot->ports[port]; u32 learn_ena = 0; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot_port->stp_state = state; if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) && @@ -1585,7 +1977,9 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA, ANA_PORT_PORT_CFG, port); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_bridge_stp_state_set); @@ -1815,9 +2209,13 @@ void ocelot_port_bridge_join(struct ocelot *ocelot, int port, { struct ocelot_port *ocelot_port = ocelot->ports[port]; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot_port->bridge = bridge; - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_bridge_join); @@ -1826,11 +2224,15 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, { struct ocelot_port *ocelot_port = ocelot->ports[port]; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot_port->bridge = NULL; ocelot_port_set_pvid(ocelot, port, NULL); ocelot_port_manage_port_tag(ocelot, port); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, false); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_bridge_leave); @@ -1874,13 +2276,17 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) if (!bond || (visited & BIT(lag))) continue; - bond_mask = ocelot_get_bond_mask(ocelot, bond, true); + bond_mask = ocelot_get_bond_mask(ocelot, bond); for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + // Destination mask ocelot_write_rix(ocelot, bond_mask, ANA_PGID_PGID, port); - aggr_idx[num_active_ports++] = port; + + if (ocelot_port->lag_tx_active) + aggr_idx[num_active_ports++] = port; } for_each_aggr_pgid(ocelot, i) { @@ -1929,8 +2335,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) bond = ocelot_port->bond; if (bond) { - int lag = __ffs(ocelot_get_bond_mask(ocelot, bond, - false)); + int lag = __ffs(ocelot_get_bond_mask(ocelot, bond)); ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_PORTID_VAL(lag), @@ -1952,12 +2357,16 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port, if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) return -EOPNOTSUPP; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->bond = bond; ocelot_setup_logical_port_ids(ocelot); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, true); ocelot_set_aggr_pgids(ocelot); + mutex_unlock(&ocelot->fwd_domain_lock); + return 0; } EXPORT_SYMBOL(ocelot_port_lag_join); @@ -1965,11 +2374,15 @@ EXPORT_SYMBOL(ocelot_port_lag_join); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond) { + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->bond = NULL; ocelot_setup_logical_port_ids(ocelot); - ocelot_apply_bridge_fwd_mask(ocelot); + ocelot_apply_bridge_fwd_mask(ocelot, false); ocelot_set_aggr_pgids(ocelot); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL(ocelot_port_lag_leave); @@ -2260,6 +2673,7 @@ int ocelot_init(struct ocelot *ocelot) mutex_init(&ocelot->stats_lock); mutex_init(&ocelot->ptp_lock); mutex_init(&ocelot->mact_lock); + mutex_init(&ocelot->fwd_domain_lock); spin_lock_init(&ocelot->ptp_clock_lock); spin_lock_init(&ocelot->ts_id_lock); snprintf(queue_name, sizeof(queue_name), "%s-stats", @@ -2283,6 +2697,9 @@ int ocelot_init(struct ocelot *ocelot) ocelot_vcap_init(ocelot); ocelot_cpu_port_init(ocelot); + if (ocelot->ops->psfp_init) + ocelot->ops->psfp_init(ocelot); + for (port = 0; port < ocelot->num_phys_ports; port++) { /* Clear all counters (5 groups) */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index e43da09b8f91..bf4eff6d7086 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -32,6 +32,8 @@ #define OCELOT_PTP_QUEUE_SZ 128 +#define OCELOT_JUMBO_MTU 9000 + struct ocelot_port_tc { bool block_shared; unsigned long offload_cnt; @@ -55,19 +57,6 @@ struct ocelot_dump_ctx { int idx; }; -/* MAC table entry types. - * ENTRYTYPE_NORMAL is subject to aging. - * ENTRYTYPE_LOCKED is not subject to aging. - * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. - * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. - */ -enum macaccess_entry_type { - ENTRYTYPE_NORMAL = 0, - ENTRYTYPE_LOCKED, - ENTRYTYPE_MACv4, - ENTRYTYPE_MACv6, -}; - /* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports * possibilities of egress port masks for L2 multicast traffic. * For a switch with 9 user ports, there are 512 possible port masks, but the diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c new file mode 100644 index 000000000000..dffa597bffe6 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -0,0 +1,894 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + * + * Page recycling code is mostly taken from gianfar driver. + */ + +#include <linux/align.h> +#include <linux/bitops.h> +#include <linux/dmapool.h> +#include <linux/dsa/ocelot.h> +#include <linux/netdevice.h> +#include <linux/of_platform.h> +#include <linux/skbuff.h> + +#include "ocelot_fdma.h" +#include "ocelot_qs.h" + +DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data) +{ + regmap_write(ocelot->targets[FDMA], reg, data); +} + +static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg) +{ + u32 retval; + + regmap_read(ocelot->targets[FDMA], reg, &retval); + + return retval; +} + +static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx) +{ + return base + idx * sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma) +{ + return (dma - base) / sizeof(struct ocelot_fdma_dcb); +} + +static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz) +{ + return unlikely(idx == ring_sz - 1) ? 0 : idx + 1; +} + +static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz) +{ + return unlikely(idx == 0) ? ring_sz - 1 : idx - 1; +} + +static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring; + + if (rx_ring->next_to_use >= rx_ring->next_to_clean) + return OCELOT_FDMA_RX_RING_SIZE - + (rx_ring->next_to_use - rx_ring->next_to_clean) - 1; + else + return rx_ring->next_to_clean - rx_ring->next_to_use - 1; +} + +static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + if (tx_ring->next_to_use >= tx_ring->next_to_clean) + return OCELOT_FDMA_TX_RING_SIZE - + (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; + else + return tx_ring->next_to_clean - tx_ring->next_to_use - 1; +} + +static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + + return tx_ring->next_to_clean == tx_ring->next_to_use; +} + +static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, + int chan) +{ + ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma); + /* Barrier to force memory writes to DCB to be completed before starting + * the channel. + */ + wmb(); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan)); +} + +static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan) +{ + unsigned long timeout; + u32 safe; + + timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US); + do { + safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); + if (safe & BIT(chan)) + return 0; + } while (time_after(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, + dma_addr_t dma_addr, + size_t size) +{ + u32 offset = dma_addr & 0x3; + + dcb->llp = 0; + dcb->datap = ALIGN_DOWN(dma_addr, 4); + dcb->datal = ALIGN_DOWN(size, 4); + dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset); +} + +static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *rxb) +{ + dma_addr_t mapping; + struct page *page; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) { + __free_page(page); + return false; + } + + rxb->page = page; + rxb->page_offset = 0; + rxb->dma_addr = mapping; + + return true; +} + +static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma_addr; + int ret = 0; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_use; + + while (alloc_cnt--) { + rxb = &rx_ring->bufs[idx]; + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) { + dev_err_ratelimited(ocelot->dev, + "Failed to allocate rx\n"); + ret = -ENOMEM; + break; + } + } + + dcb = &rx_ring->dcbs[idx]; + dma_addr = rxb->dma_addr + rxb->page_offset; + ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE); + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + /* Chain the DCB to the next one */ + dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx); + } + + rx_ring->next_to_use = idx; + rx_ring->next_to_alloc = idx; + + return ret; +} + +static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot, + struct ocelot_fdma_tx_buf *tx_buf, + struct ocelot_fdma_dcb *dcb, + struct sk_buff *skb) +{ + dma_addr_t mapping; + + mapping = dma_map_single(ocelot->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ocelot->dev, mapping))) + return false; + + dma_unmap_addr_set(tx_buf, dma_addr, mapping); + + ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE); + tx_buf->skb = skb; + dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len); + dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF; + + return true; +} + +static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot) +{ + u32 llp; + + /* Check if the FDMA hits the DCB with LLP == NULL */ + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN)); + if (unlikely(llp)) + return false; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE, + BIT(MSCC_FDMA_XTR_CHAN)); + + return true; +} + +static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring) +{ + struct ocelot_fdma_dcb *dcb; + unsigned int idx; + + idx = ocelot_fdma_idx_prev(rx_ring->next_to_use, + OCELOT_FDMA_RX_RING_SIZE); + dcb = &rx_ring->dcbs[idx]; + dcb->llp = 0; +} + +static void ocelot_fdma_rx_restart(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + const u8 chan = MSCC_FDMA_XTR_CHAN; + dma_addr_t new_llp, dma_base; + unsigned int idx; + u32 llp_prev; + int ret; + + rx_ring = &fdma->rx_ring; + ret = ocelot_fdma_wait_chan_safe(ocelot, chan); + if (ret) { + dev_err_ratelimited(ocelot->dev, + "Unable to stop RX channel\n"); + return; + } + + ocelot_fdma_rx_set_llp(rx_ring); + + /* FDMA stopped on the last DCB that contained a NULL LLP, since + * we processed some DCBs in RX, there is free space, and we must set + * DCB_LLP to point to the next DCB + */ + llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan)); + dma_base = rx_ring->dcbs_dma; + + /* Get the next DMA addr located after LLP == NULL DCB */ + idx = ocelot_fdma_dma_idx(dma_base, llp_prev); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + new_llp = ocelot_fdma_idx_dma(dma_base, idx); + + /* Finally reactivate the channel */ + ocelot_fdma_activate_chan(ocelot, new_llp, chan); +} + +static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat, + struct sk_buff *skb, bool first) +{ + int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat); + struct page *page = rxb->page; + + if (likely(first)) { + skb_put(skb, size); + } else { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset, size, OCELOT_FDMA_RX_SIZE); + } + + /* Try to reuse page */ + if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page))) + return false; + + /* Change offset to the other half */ + rxb->page_offset ^= OCELOT_FDMA_RX_SIZE; + + page_ref_inc(page); + + return true; +} + +static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot, + struct ocelot_fdma_rx_buf *old_rxb) +{ + struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring; + struct ocelot_fdma_rx_buf *new_rxb; + + new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc]; + rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc, + OCELOT_FDMA_RX_RING_SIZE); + + /* Copy page reference */ + *new_rxb = *old_rxb; + + /* Sync for use by the device */ + dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr, + old_rxb->page_offset, + OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat, + struct ocelot_fdma_rx_buf *rxb, + struct sk_buff *skb) +{ + bool first = false; + + /* Allocate skb head and data */ + if (likely(!skb)) { + void *buff_addr = page_address(rxb->page) + + rxb->page_offset; + + skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE); + if (unlikely(!skb)) { + dev_err_ratelimited(ocelot->dev, + "build_skb failed !\n"); + return NULL; + } + first = true; + } + + dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr, + rxb->page_offset, OCELOT_FDMA_RX_SIZE, + DMA_FROM_DEVICE); + + if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) { + /* Reuse the free half of the page for the next_to_alloc DCB*/ + ocelot_fdma_reuse_rx_page(ocelot, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear rx buff content */ + rxb->page = NULL; + + return skb; +} + +static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb) +{ + struct net_device *ndev; + void *xfh = skb->data; + u64 timestamp; + u64 src_port; + + skb_pull(skb, OCELOT_TAG_LEN); + + ocelot_xfh_get_src_port(xfh, &src_port); + if (unlikely(src_port >= ocelot->num_phys_ports)) + return false; + + ndev = ocelot_port_to_netdev(ocelot, src_port); + if (unlikely(!ndev)) + return false; + + pskb_trim(skb, skb->len - ETH_FCS_LEN); + + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + if (ocelot->ptp) { + ocelot_xfh_get_rew_val(xfh, ×tamp); + ocelot_ptp_rx_timestamp(ocelot, skb, timestamp); + } + + if (likely(!skb_defer_rx_timestamp(skb))) + netif_receive_skb(skb); + + return true; +} + +static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + struct ocelot_fdma_dcb *dcb; + struct sk_buff *skb; + int work_done = 0; + int cleaned_cnt; + u32 stat; + u16 idx; + + cleaned_cnt = ocelot_fdma_rx_ring_free(fdma); + rx_ring = &fdma->rx_ring; + skb = rx_ring->skb; + + while (budget--) { + idx = rx_ring->next_to_clean; + dcb = &rx_ring->dcbs[idx]; + stat = dcb->stat; + if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0) + break; + + /* New packet is a start of frame but we already got a skb set, + * we probably lost an EOF packet, free skb + */ + if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) { + dev_kfree_skb(skb); + skb = NULL; + } + + rxb = &rx_ring->bufs[idx]; + /* Fetch next to clean buffer from the rx_ring */ + skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb); + if (unlikely(!skb)) + break; + + work_done++; + cleaned_cnt++; + + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + rx_ring->next_to_clean = idx; + + if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT || + stat & MSCC_FDMA_DCB_STAT_PD)) { + dev_err_ratelimited(ocelot->dev, + "DCB aborted or pruned\n"); + dev_kfree_skb(skb); + skb = NULL; + continue; + } + + /* We still need to process the other fragment of the packet + * before delivering it to the network stack + */ + if (!(stat & MSCC_FDMA_DCB_STAT_EOF)) + continue; + + if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb))) + dev_kfree_skb(skb); + + skb = NULL; + } + + rx_ring->skb = skb; + + if (cleaned_cnt) + ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt); + + return work_done; +} + +static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot) +{ + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct net_device *dev; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_port = ocelot->ports[port]; + if (!ocelot_port) + continue; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + dev = priv->dev; + + if (unlikely(netif_queue_stopped(dev))) + netif_wake_queue(dev); + } +} + +static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *buf; + unsigned int new_null_llp_idx; + struct ocelot_fdma_dcb *dcb; + bool end_of_list = false; + struct sk_buff *skb; + dma_addr_t dma; + u32 dcb_llp; + u16 ntc; + int ret; + + tx_ring = &fdma->tx_ring; + + /* Purge the TX packets that have been sent up to the NULL llp or the + * end of done list. + */ + while (!ocelot_fdma_tx_ring_empty(fdma)) { + ntc = tx_ring->next_to_clean; + dcb = &tx_ring->dcbs[ntc]; + if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) + break; + + buf = &tx_ring->bufs[ntc]; + skb = buf->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr), + skb->len, DMA_TO_DEVICE); + napi_consume_skb(skb, budget); + dcb_llp = dcb->llp; + + /* Only update after accessing all dcb fields */ + tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc, + OCELOT_FDMA_TX_RING_SIZE); + + /* If we hit the NULL LLP, stop, we might need to reload FDMA */ + if (dcb_llp == 0) { + end_of_list = true; + break; + } + } + + /* No need to try to wake if there were no TX cleaned_cnt up. */ + if (ocelot_fdma_tx_ring_free(fdma)) + ocelot_fdma_wakeup_netdev(ocelot); + + /* If there is still some DCBs to be processed by the FDMA or if the + * pending list is empty, there is no need to restart the FDMA. + */ + if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma)) + return; + + ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN); + if (ret) { + dev_warn(ocelot->dev, + "Failed to wait for TX channel to stop\n"); + return; + } + + /* Set NULL LLP to be the last DCB used */ + new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + dcb = &tx_ring->dcbs[new_null_llp_idx]; + dcb->llp = 0; + + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); +} + +static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget) +{ + struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi); + struct ocelot *ocelot = fdma->ocelot; + int work_done = 0; + bool rx_stopped; + + ocelot_fdma_tx_cleanup(ocelot, budget); + + rx_stopped = ocelot_fdma_check_stop_rx(ocelot); + + work_done = ocelot_fdma_rx_get(ocelot, budget); + + if (rx_stopped) + ocelot_fdma_rx_restart(ocelot); + + if (work_done < budget) { + napi_complete_done(&fdma->napi, work_done); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | + BIT(MSCC_FDMA_XTR_CHAN)); + } + + return work_done; +} + +static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id) +{ + u32 ident, llp, frm, err, err_code; + struct ocelot *ocelot = dev_id; + + ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT); + frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM); + llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident); + if (frm || llp) { + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + napi_schedule(&ocelot->fdma->napi); + } + + err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR); + if (unlikely(err)) { + err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE); + dev_err_ratelimited(ocelot->dev, + "Error ! chans mask: %#x, code: %#x\n", + err, err_code); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err); + ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code); + } + + return IRQ_HANDLED; +} + +static void ocelot_fdma_send_skb(struct ocelot *ocelot, + struct ocelot_fdma *fdma, struct sk_buff *skb) +{ + struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; + struct ocelot_fdma_tx_buf *tx_buf; + struct ocelot_fdma_dcb *dcb; + dma_addr_t dma; + u16 next_idx; + + dcb = &tx_ring->dcbs[tx_ring->next_to_use]; + tx_buf = &tx_ring->bufs[tx_ring->next_to_use]; + if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) { + dev_kfree_skb_any(skb); + return; + } + + next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use, + OCELOT_FDMA_TX_RING_SIZE); + skb_tx_timestamp(skb); + + /* If the FDMA TX chan is empty, then enqueue the DCB directly */ + if (ocelot_fdma_tx_ring_empty(fdma)) { + dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, + tx_ring->next_to_use); + ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN); + } else { + /* Chain the DCBs */ + dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx); + } + + tx_ring->next_to_use = next_idx; +} + +static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0); + int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + void *ifh; + int err; + + if (unlikely(needed_headroom || needed_tailroom || + skb_header_cloned(skb))) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev_kfree_skb_any(skb); + return 1; + } + } + + err = skb_linearize(skb); + if (err) { + net_err_ratelimited("%s: skb_linearize error (%d)!\n", + dev->name, err); + dev_kfree_skb_any(skb); + return 1; + } + + ifh = skb_push(skb, OCELOT_TAG_LEN); + skb_put(skb, ETH_FCS_LEN); + memset(ifh, 0, OCELOT_TAG_LEN); + ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb)); + + return 0; +} + +int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + int ret = NETDEV_TX_OK; + + spin_lock(&fdma->tx_ring.xmit_lock); + + if (ocelot_fdma_tx_ring_free(fdma) == 0) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev)) + goto out; + + ocelot_fdma_send_skb(ocelot, fdma, skb); + +out: + spin_unlock(&fdma->tx_ring.xmit_lock); + + return ret; +} + +static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_rx_ring *rx_ring; + struct ocelot_fdma_rx_buf *rxb; + u16 idx; + + rx_ring = &fdma->rx_ring; + idx = rx_ring->next_to_clean; + + /* Free the pages held in the RX ring */ + while (idx != rx_ring->next_to_use) { + rxb = &rx_ring->bufs[idx]; + dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(rxb->page); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); + } + + if (fdma->rx_ring.skb) + dev_kfree_skb_any(fdma->rx_ring.skb); +} + +static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_tx_ring *tx_ring; + struct ocelot_fdma_tx_buf *txb; + struct sk_buff *skb; + u16 idx; + + tx_ring = &fdma->tx_ring; + idx = tx_ring->next_to_clean; + + while (idx != tx_ring->next_to_use) { + txb = &tx_ring->bufs[idx]; + skb = txb->skb; + dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE); + } +} + +static int ocelot_fdma_rings_alloc(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + struct ocelot_fdma_dcb *dcbs; + unsigned int adjust; + dma_addr_t dcbs_dma; + int ret; + + /* Create a pool of consistent memory blocks for hardware descriptors */ + fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev, + OCELOT_DCBS_HW_ALLOC_SIZE, + &fdma->dcbs_dma_base, GFP_KERNEL); + if (!fdma->dcbs_base) + return -ENOMEM; + + /* DCBs must be aligned on a 32bit boundary */ + dcbs = fdma->dcbs_base; + dcbs_dma = fdma->dcbs_dma_base; + if (!IS_ALIGNED(dcbs_dma, 4)) { + adjust = dcbs_dma & 0x3; + dcbs_dma = ALIGN(dcbs_dma, 4); + dcbs = (void *)dcbs + adjust; + } + + /* TX queue */ + fdma->tx_ring.dcbs = dcbs; + fdma->tx_ring.dcbs_dma = dcbs_dma; + spin_lock_init(&fdma->tx_ring.xmit_lock); + + /* RX queue */ + fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE; + fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE; + ret = ocelot_fdma_alloc_rx_buffs(ocelot, + ocelot_fdma_tx_ring_free(fdma)); + if (ret) { + ocelot_fdma_free_rx_ring(ocelot); + return ret; + } + + /* Set the last DCB LLP as NULL, this is normally done when restarting + * the RX chan, but this is for the first run + */ + ocelot_fdma_rx_set_llp(&fdma->rx_ring); + + return 0; +} + +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + dev->needed_headroom = OCELOT_TAG_LEN; + dev->needed_tailroom = ETH_FCS_LEN; + + if (fdma->ndev) + return; + + fdma->ndev = dev; + netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll, + OCELOT_FDMA_WEIGHT); +} + +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + if (fdma->ndev == dev) { + netif_napi_del(&fdma->napi); + fdma->ndev = NULL; + } +} + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot) +{ + struct device *dev = ocelot->dev; + struct ocelot_fdma *fdma; + int ret; + + fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL); + if (!fdma) + return; + + ocelot->fdma = fdma; + ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + + fdma->ocelot = ocelot; + fdma->irq = platform_get_irq_byname(pdev, "fdma"); + ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0, + dev_name(dev), ocelot); + if (ret) + goto err_free_fdma; + + ret = ocelot_fdma_rings_alloc(ocelot); + if (ret) + goto err_free_irq; + + static_branch_enable(&ocelot_fdma_enabled); + + return; + +err_free_irq: + devm_free_irq(dev, fdma->irq, fdma); +err_free_fdma: + devm_kfree(dev, fdma); + + ocelot->fdma = NULL; +} + +void ocelot_fdma_start(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + /* Reconfigure for extraction and injection using DMA */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0); + + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff); + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, + BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN)); + + napi_enable(&fdma->napi); + + ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma, + MSCC_FDMA_XTR_CHAN); +} + +void ocelot_fdma_deinit(struct ocelot *ocelot) +{ + struct ocelot_fdma *fdma = ocelot->fdma; + + ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_XTR_CHAN)); + ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS, + BIT(MSCC_FDMA_INJ_CHAN)); + napi_synchronize(&fdma->napi); + napi_disable(&fdma->napi); + + ocelot_fdma_free_rx_ring(ocelot); + ocelot_fdma_free_tx_ring(ocelot); +} diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h new file mode 100644 index 000000000000..2fc8e1dd7230 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_fdma.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi SoCs FDMA driver + * + * Copyright (c) 2021 Microchip + */ +#ifndef _MSCC_OCELOT_FDMA_H_ +#define _MSCC_OCELOT_FDMA_H_ + +#include "ocelot.h" + +#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20) +#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20) +#define MSCC_FDMA_DCB_STAT_PD BIT(19) +#define MSCC_FDMA_DCB_STAT_ABORT BIT(18) +#define MSCC_FDMA_DCB_STAT_EOF BIT(17) +#define MSCC_FDMA_DCB_STAT_SOF BIT(16) +#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0) +#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0)) + +#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0) +#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0) +#define MSCC_FDMA_CH_SAFE 0xcc +#define MSCC_FDMA_CH_ACTIVATE 0xd0 +#define MSCC_FDMA_CH_DISABLE 0xd4 +#define MSCC_FDMA_CH_FORCEDIS 0xd8 +#define MSCC_FDMA_EVT_ERR 0x164 +#define MSCC_FDMA_EVT_ERR_CODE 0x168 +#define MSCC_FDMA_INTR_LLP 0x16c +#define MSCC_FDMA_INTR_LLP_ENA 0x170 +#define MSCC_FDMA_INTR_FRM 0x174 +#define MSCC_FDMA_INTR_FRM_ENA 0x178 +#define MSCC_FDMA_INTR_ENA 0x184 +#define MSCC_FDMA_INTR_IDENT 0x188 + +#define MSCC_FDMA_INJ_CHAN 2 +#define MSCC_FDMA_XTR_CHAN 0 + +#define OCELOT_FDMA_WEIGHT 32 + +#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10 + +#define OCELOT_FDMA_RX_RING_SIZE 512 +#define OCELOT_FDMA_TX_RING_SIZE 128 + +#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \ + sizeof(struct ocelot_fdma_dcb)) +/* +4 allows for word alignment after allocation */ +#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \ + OCELOT_FDMA_TX_DCB_SIZE + \ + 4) + +#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2) + +#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4) +#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR) + +DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled); + +struct ocelot_fdma_dcb { + u32 llp; + u32 datap; + u32 datal; + u32 stat; +} __packed; + +/** + * struct ocelot_fdma_tx_buf - TX buffer structure + * @skb: SKB currently used in the corresponding DCB. + * @dma_addr: SKB DMA mapped address. + */ +struct ocelot_fdma_tx_buf { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +/** + * struct ocelot_fdma_tx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of TX buffer associated to the DCBs + * @xmit_lock: lock for concurrent xmit access + * @next_to_clean: Next DCB to be cleaned in tx_cleanup + * @next_to_use: Next available DCB to send SKB + */ +struct ocelot_fdma_tx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE]; + /* Protect concurrent xmit calls */ + spinlock_t xmit_lock; + u16 next_to_clean; + u16 next_to_use; +}; + +/** + * struct ocelot_fdma_rx_buf - RX buffer structure + * @page: Struct page used in this buffer + * @page_offset: Current page offset (either 0 or PAGE_SIZE/2) + * @dma_addr: DMA address of the page + */ +struct ocelot_fdma_rx_buf { + struct page *page; + u32 page_offset; + dma_addr_t dma_addr; +}; + +/** + * struct ocelot_fdma_rx_ring - TX ring description of DCBs + * + * @dcbs: DCBs allocated for the ring + * @dcbs_dma: DMA base address of the DCBs + * @bufs: List of RX buffer associated to the DCBs + * @skb: SKB currently received by the netdev + * @next_to_clean: Next DCB to be cleaned NAPI polling + * @next_to_use: Next available DCB to send SKB + * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc) + */ +struct ocelot_fdma_rx_ring { + struct ocelot_fdma_dcb *dcbs; + dma_addr_t dcbs_dma; + struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE]; + struct sk_buff *skb; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; +}; + +/** + * struct ocelot_fdma - FDMA context + * + * @irq: FDMA interrupt + * @ndev: Net device used to initialize NAPI + * @dcbs_base: Memory coherent DCBs + * @dcbs_dma_base: DMA base address of memory coherent DCBs + * @tx_ring: Injection ring + * @rx_ring: Extraction ring + * @napi: NAPI context + * @ocelot: Back-pointer to ocelot struct + */ +struct ocelot_fdma { + int irq; + struct net_device *ndev; + struct ocelot_fdma_dcb *dcbs_base; + dma_addr_t dcbs_dma_base; + struct ocelot_fdma_tx_ring tx_ring; + struct ocelot_fdma_rx_ring rx_ring; + struct napi_struct napi; + struct ocelot *ocelot; +}; + +void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot); +void ocelot_fdma_start(struct ocelot *ocelot); +void ocelot_fdma_deinit(struct ocelot *ocelot); +int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op, + struct sk_buff *skb, struct net_device *dev); +void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev); +void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, + struct net_device *dev); + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 769a8159373e..beb9379424c0 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -20,6 +20,9 @@ (1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP) #define VCAP_IS2_CHAIN(lookup, pag) \ (2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag)) +/* PSFP chain and block ID */ +#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS +#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK) static int ocelot_chain_to_block(int chain, bool ingress) { @@ -46,6 +49,9 @@ static int ocelot_chain_to_block(int chain, bool ingress) if (chain == VCAP_IS2_CHAIN(lookup, pag)) return VCAP_IS2; + if (chain == OCELOT_PSFP_CHAIN) + return PSFP_BLOCK_ID; + return -EOPNOTSUPP; } @@ -84,7 +90,8 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain, goto_target == VCAP_IS1_CHAIN(1) || goto_target == VCAP_IS1_CHAIN(2) || goto_target == VCAP_IS2_CHAIN(0, 0) || - goto_target == VCAP_IS2_CHAIN(1, 0)); + goto_target == VCAP_IS2_CHAIN(1, 0) || + goto_target == OCELOT_PSFP_CHAIN); if (chain == VCAP_IS1_CHAIN(0)) return (goto_target == VCAP_IS1_CHAIN(1)); @@ -111,7 +118,11 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain, if (chain == VCAP_IS2_CHAIN(0, pag)) return (goto_target == VCAP_IS2_CHAIN(1, pag)); - /* VCAP IS2 lookup 1 cannot jump anywhere */ + /* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */ + for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++) + if (chain == VCAP_IS2_CHAIN(1, pag)) + return (goto_target == OCELOT_PSFP_CHAIN); + return false; } @@ -211,6 +222,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, const struct flow_action_entry *a; enum ocelot_tag_tpid_sel tpid; int i, chain, egress_port; + u32 pol_ix, pol_max; u64 rate; int err; @@ -269,10 +281,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; case FLOW_ACTION_POLICE: + if (filter->block_id == PSFP_BLOCK_ID) { + filter->type = OCELOT_PSFP_FILTER_OFFLOAD; + break; + } if (filter->block_id != VCAP_IS2 || filter->lookup != 0) { NL_SET_ERR_MSG_MOD(extack, - "Police action can only be offloaded to VCAP IS2 lookup 0"); + "Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP"); return -EOPNOTSUPP; } if (filter->goto_target != -1) { @@ -286,6 +302,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, return -EOPNOTSUPP; } filter->action.police_ena = true; + + pol_ix = a->hw_index + ocelot->vcap_pol.base; + pol_max = ocelot->vcap_pol.max; + + if (ocelot->vcap_pol.max2 && pol_ix > pol_max) { + pol_ix += ocelot->vcap_pol.base2 - pol_max - 1; + pol_max = ocelot->vcap_pol.max2; + } + + if (pol_ix >= pol_max) + return -EINVAL; + + filter->action.pol_ix = pol_ix; + rate = a->police.rate_bytes_ps; filter->action.pol.rate = div_u64(rate, 1000) * 8; filter->action.pol.burst = a->police.burst; @@ -399,6 +429,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->action.pcp_a_val = a->vlan.prio; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; + case FLOW_ACTION_GATE: + if (filter->block_id != PSFP_BLOCK_ID) { + NL_SET_ERR_MSG_MOD(extack, + "Gate action can only be offloaded to PSFP chain"); + return -EOPNOTSUPP; + } + filter->type = OCELOT_PSFP_FILTER_OFFLOAD; + break; default: NL_SET_ERR_MSG_MOD(extack, "Cannot offload action"); return -EOPNOTSUPP; @@ -407,7 +445,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, if (filter->goto_target == -1) { if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) || - chain == 0) { + chain == 0 || filter->block_id == PSFP_BLOCK_ID) { allow_missing_goto_target = true; } else { NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action"); @@ -689,6 +727,10 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress, if (ret) return ret; + /* PSFP filter need to parse key by stream identification function. */ + if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) + return 0; + return ocelot_flower_parse_key(ocelot, port, ingress, f, filter); } @@ -792,6 +834,15 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, if (filter->type == OCELOT_VCAP_FILTER_DUMMY) return ocelot_vcap_dummy_filter_add(ocelot, filter); + if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) { + kfree(filter); + if (ocelot->ops->psfp_filter_add) + return ocelot->ops->psfp_filter_add(ocelot, port, f); + + NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW"); + return -EOPNOTSUPP; + } + return ocelot_vcap_filter_add(ocelot, filter, f->common.extack); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace); @@ -807,6 +858,13 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, if (block_id < 0) return 0; + if (block_id == PSFP_BLOCK_ID) { + if (ocelot->ops->psfp_filter_del) + return ocelot->ops->psfp_filter_del(ocelot, f); + + return -EOPNOTSUPP; + } + block = &ocelot->block[block_id]; filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); @@ -825,12 +883,25 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, { struct ocelot_vcap_filter *filter; struct ocelot_vcap_block *block; + struct flow_stats stats = {0}; int block_id, ret; block_id = ocelot_chain_to_block(f->common.chain_index, ingress); if (block_id < 0) return 0; + if (block_id == PSFP_BLOCK_ID) { + if (ocelot->ops->psfp_stats_get) { + ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats); + if (ret) + return ret; + + goto stats_update; + } + + return -EOPNOTSUPP; + } + block = &ocelot->block[block_id]; filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true); @@ -841,7 +912,10 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, if (ret) return ret; - flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0, + stats.pkts = filter->stats.pkts; + +stats_update: + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index eaeba60b1bba..8115c3db252e 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -15,6 +15,7 @@ #include <net/pkt_cls.h> #include "ocelot.h" #include "ocelot_vcap.h" +#include "ocelot_fdma.h" #define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP @@ -457,7 +458,8 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) int port = priv->chip_port; u32 rew_op = 0; - if (!ocelot_can_inject(ocelot, 0)) + if (!static_branch_unlikely(&ocelot_fdma_enabled) && + !ocelot_can_inject(ocelot, 0)) return NETDEV_TX_BUSY; /* Check if timestamping is needed */ @@ -475,9 +477,13 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) rew_op = ocelot_ptp_rew_op(skb); } - ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + if (static_branch_unlikely(&ocelot_fdma_enabled)) { + ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev); + } else { + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); - kfree_skb(skb); + consume_skb(skb); + } return NETDEV_TX_OK; } @@ -764,10 +770,23 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return phy_mii_ioctl(dev->phydev, ifr, cmd); } +static int ocelot_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + + ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu); + WRITE_ONCE(dev->mtu, new_mtu); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, .ndo_start_xmit = ocelot_port_xmit, + .ndo_change_mtu = ocelot_change_mtu, .ndo_set_rx_mode = ocelot_set_rx_mode, .ndo_set_mac_address = ocelot_port_set_mac_address, .ndo_get_stats64 = ocelot_get_stats64, @@ -1498,40 +1517,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; -static void vsc7514_phylink_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct net_device *ndev = to_net_dev(config->dev); - struct ocelot_port_private *priv = netdev_priv(ndev); - struct ocelot_port *ocelot_port = &priv->port; - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {}; - - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - linkmode_zero(supported); - return; - } - - phylink_set_port_modes(mask); - - phylink_set(mask, Pause); - phylink_set(mask, Autoneg); - phylink_set(mask, Asym_Pause); - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Half); - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - static void vsc7514_phylink_mac_config(struct phylink_config *config, unsigned int link_an_mode, const struct phylink_link_state *state) @@ -1590,7 +1575,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops ocelot_phylink_ops = { - .validate = vsc7514_phylink_validate, + .validate = phylink_generic_validate, .mac_config = vsc7514_phylink_mac_config, .mac_link_down = vsc7514_phylink_mac_link_down, .mac_link_up = vsc7514_phylink_mac_link_up, @@ -1654,6 +1639,11 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port, priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; + priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + + __set_bit(ocelot_port->phy_mode, + priv->phylink_config.supported_interfaces); phylink = phylink_create(&priv->phylink_config, of_fwnode_handle(portnp), @@ -1699,12 +1689,16 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, dev->netdev_ops = &ocelot_port_netdev_ops; dev->ethtool_ops = &ocelot_ethtool_ops; + dev->max_mtu = OCELOT_JUMBO_MTU; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS | NETIF_F_HW_TC; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; - eth_hw_addr_gen(dev, ocelot->base_mac, port); + err = of_get_ethdev_address(portnp, dev); + if (err) + eth_hw_addr_gen(dev, ocelot->base_mac, port); + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED); @@ -1714,14 +1708,20 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, if (err) goto out; + if (ocelot->fdma) + ocelot_fdma_netdev_init(ocelot, dev); + err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); - goto out; + goto out_fdma_deinit; } return 0; +out_fdma_deinit: + if (ocelot->fdma) + ocelot_fdma_netdev_deinit(ocelot, dev); out: ocelot->ports[port] = NULL; free_netdev(dev); @@ -1734,9 +1734,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port) struct ocelot_port_private *priv = container_of(ocelot_port, struct ocelot_port_private, port); + struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_fdma *fdma = ocelot->fdma; unregister_netdev(priv->dev); + if (fdma) + ocelot_fdma_netdev_deinit(ocelot, priv->dev); + if (priv->phylink) { rtnl_lock(); phylink_disconnect_phy(priv->phylink); diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 99d7376a70a7..d3544413a8a4 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -887,10 +887,18 @@ static void vcap_entry_set(struct ocelot *ocelot, int ix, return es0_entry_set(ocelot, ix, filter); } -static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) +struct vcap_policer_entry { + struct list_head list; + refcount_t refcount; + u32 pol_ix; +}; + +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) { struct qos_policer_conf pp = { 0 }; + struct vcap_policer_entry *tmp; + int ret; if (!pol) return -EINVAL; @@ -899,57 +907,74 @@ static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, pp.pir = pol->rate; pp.pbs = pol->burst; - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list) + if (tmp->pol_ix == pol_ix) { + refcount_inc(&tmp->refcount); + return 0; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + if (ret) { + kfree(tmp); + return ret; + } + + tmp->pol_ix = pol_ix; + refcount_set(&tmp->refcount, 1); + list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list); + + return 0; } +EXPORT_SYMBOL(ocelot_vcap_policer_add); -static void ocelot_vcap_policer_del(struct ocelot *ocelot, - struct ocelot_vcap_block *block, - u32 pol_ix) +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) { - struct ocelot_vcap_filter *filter; struct qos_policer_conf pp = {0}; - int index = -1; - - if (pol_ix < block->pol_lpr) - return; - - list_for_each_entry(filter, &block->rules, list) { - index++; - if (filter->block_id == VCAP_IS2 && - filter->action.police_ena && - filter->action.pol_ix < pol_ix) { - filter->action.pol_ix += 1; - ocelot_vcap_policer_add(ocelot, filter->action.pol_ix, - &filter->action.pol); - is2_entry_set(ocelot, index, filter); + struct vcap_policer_entry *tmp, *n; + u8 z = 0; + + list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list) + if (tmp->pol_ix == pol_ix) { + z = refcount_dec_and_test(&tmp->refcount); + if (z) { + list_del(&tmp->list); + kfree(tmp); + } } - } - pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + if (z) { + pp.mode = MSCC_QOS_RATE_MODE_DISABLED; + return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + } - block->pol_lpr++; + return 0; } +EXPORT_SYMBOL(ocelot_vcap_policer_del); -static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, - struct ocelot_vcap_block *block, - struct ocelot_vcap_filter *filter) +static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { struct ocelot_vcap_filter *tmp; struct list_head *pos, *n; + int ret; if (filter->block_id == VCAP_IS2 && filter->action.police_ena) { - block->pol_lpr--; - filter->action.pol_ix = block->pol_lpr; - ocelot_vcap_policer_add(ocelot, filter->action.pol_ix, - &filter->action.pol); + ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix, + &filter->action.pol); + if (ret) + return ret; } block->count++; if (list_empty(&block->rules)) { list_add(&filter->list, &block->rules); - return; + return 0; } list_for_each_safe(pos, n, &block->rules) { @@ -958,6 +983,8 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, break; } list_add(&filter->list, pos->prev); + + return 0; } static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a, @@ -1132,7 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, struct netlink_ext_ack *extack) { struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; - int i, index; + int i, index, ret; if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) { NL_SET_ERR_MSG_MOD(extack, @@ -1141,7 +1168,9 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, } /* Add filter to the linked list */ - ocelot_vcap_filter_add_to_block(ocelot, block, filter); + ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter); + if (ret) + return ret; /* Get the index of the inserted filter */ index = ocelot_vcap_block_get_filter_index(block, filter); @@ -1174,7 +1203,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, if (ocelot_vcap_filter_equal(filter, tmp)) { if (tmp->block_id == VCAP_IS2 && tmp->action.police_ena) - ocelot_vcap_policer_del(ocelot, block, + ocelot_vcap_policer_del(ocelot, tmp->action.pol_ix); list_del(pos); @@ -1217,6 +1246,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot, } EXPORT_SYMBOL(ocelot_vcap_filter_del); +int ocelot_vcap_filter_replace(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) +{ + struct ocelot_vcap_block *block = &ocelot->block[filter->block_id]; + int index; + + index = ocelot_vcap_block_get_filter_index(block, filter); + if (index < 0) + return index; + + vcap_entry_set(ocelot, index, filter); + + return 0; +} +EXPORT_SYMBOL(ocelot_vcap_filter_replace); + int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, struct ocelot_vcap_filter *filter) { @@ -1350,13 +1395,13 @@ int ocelot_vcap_init(struct ocelot *ocelot) struct vcap_props *vcap = &ocelot->vcap[i]; INIT_LIST_HEAD(&block->rules); - block->pol_lpr = OCELOT_POLICER_DISCARD - 1; ocelot_vcap_detect_constants(ocelot, vcap); ocelot_vcap_init_one(ocelot, vcap); } INIT_LIST_HEAD(&ocelot->dummy_rules); + INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 38103b0255b0..4f4a495a60ad 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -18,313 +18,24 @@ #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_hsio.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot_fdma.h" #include "ocelot.h" -static const u32 ocelot_ana_regmap[] = { - REG(ANA_ADVLEARN, 0x009000), - REG(ANA_VLANMASK, 0x009004), - REG(ANA_PORT_B_DOMAIN, 0x009008), - REG(ANA_ANAGEFIL, 0x00900c), - REG(ANA_ANEVENTS, 0x009010), - REG(ANA_STORMLIMIT_BURST, 0x009014), - REG(ANA_STORMLIMIT_CFG, 0x009018), - REG(ANA_ISOLATED_PORTS, 0x009028), - REG(ANA_COMMUNITY_PORTS, 0x00902c), - REG(ANA_AUTOAGE, 0x009030), - REG(ANA_MACTOPTIONS, 0x009034), - REG(ANA_LEARNDISC, 0x009038), - REG(ANA_AGENCTRL, 0x00903c), - REG(ANA_MIRRORPORTS, 0x009040), - REG(ANA_EMIRRORPORTS, 0x009044), - REG(ANA_FLOODING, 0x009048), - REG(ANA_FLOODING_IPMC, 0x00904c), - REG(ANA_SFLOW_CFG, 0x009050), - REG(ANA_PORT_MODE, 0x009080), - REG(ANA_PGID_PGID, 0x008c00), - REG(ANA_TABLES_ANMOVED, 0x008b30), - REG(ANA_TABLES_MACHDATA, 0x008b34), - REG(ANA_TABLES_MACLDATA, 0x008b38), - REG(ANA_TABLES_MACACCESS, 0x008b3c), - REG(ANA_TABLES_MACTINDX, 0x008b40), - REG(ANA_TABLES_VLANACCESS, 0x008b44), - REG(ANA_TABLES_VLANTIDX, 0x008b48), - REG(ANA_TABLES_ISDXACCESS, 0x008b4c), - REG(ANA_TABLES_ISDXTIDX, 0x008b50), - REG(ANA_TABLES_ENTRYLIM, 0x008b00), - REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), - REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), - REG(ANA_MSTI_STATE, 0x008e00), - REG(ANA_PORT_VLAN_CFG, 0x007000), - REG(ANA_PORT_DROP_CFG, 0x007004), - REG(ANA_PORT_QOS_CFG, 0x007008), - REG(ANA_PORT_VCAP_CFG, 0x00700c), - REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), - REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), - REG(ANA_PORT_PCP_DEI_MAP, 0x007020), - REG(ANA_PORT_CPU_FWD_CFG, 0x007060), - REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), - REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), - REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), - REG(ANA_PORT_PORT_CFG, 0x007070), - REG(ANA_PORT_POL_CFG, 0x007074), - REG(ANA_PORT_PTP_CFG, 0x007078), - REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), - REG(ANA_OAM_UPM_LM_CNT, 0x007c00), - REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), - REG(ANA_PFC_PFC_CFG, 0x008800), - REG(ANA_PFC_PFC_TIMER, 0x008804), - REG(ANA_IPT_OAM_MEP_CFG, 0x008000), - REG(ANA_IPT_IPT, 0x008004), - REG(ANA_PPT_PPT, 0x008ac0), - REG(ANA_FID_MAP_FID_MAP, 0x000000), - REG(ANA_AGGR_CFG, 0x0090b4), - REG(ANA_CPUQ_CFG, 0x0090b8), - REG(ANA_CPUQ_CFG2, 0x0090bc), - REG(ANA_CPUQ_8021_CFG, 0x0090c0), - REG(ANA_DSCP_CFG, 0x009100), - REG(ANA_DSCP_REWR_CFG, 0x009200), - REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), - REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), - REG(ANA_VRAP_CFG, 0x009280), - REG(ANA_VRAP_HDR_DATA, 0x009284), - REG(ANA_VRAP_HDR_MASK, 0x009288), - REG(ANA_DISCARD_CFG, 0x00928c), - REG(ANA_FID_CFG, 0x009290), - REG(ANA_POL_PIR_CFG, 0x004000), - REG(ANA_POL_CIR_CFG, 0x004004), - REG(ANA_POL_MODE_CFG, 0x004008), - REG(ANA_POL_PIR_STATE, 0x00400c), - REG(ANA_POL_CIR_STATE, 0x004010), - REG(ANA_POL_STATE, 0x004014), - REG(ANA_POL_FLOWC, 0x008b80), - REG(ANA_POL_HYST, 0x008bec), - REG(ANA_POL_MISC_CFG, 0x008bf0), -}; - -static const u32 ocelot_qs_regmap[] = { - REG(QS_XTR_GRP_CFG, 0x000000), - REG(QS_XTR_RD, 0x000008), - REG(QS_XTR_FRM_PRUNING, 0x000010), - REG(QS_XTR_FLUSH, 0x000018), - REG(QS_XTR_DATA_PRESENT, 0x00001c), - REG(QS_XTR_CFG, 0x000020), - REG(QS_INJ_GRP_CFG, 0x000024), - REG(QS_INJ_WR, 0x00002c), - REG(QS_INJ_CTRL, 0x000034), - REG(QS_INJ_STATUS, 0x00003c), - REG(QS_INJ_ERR, 0x000040), - REG(QS_INH_DBG, 0x000048), -}; - -static const u32 ocelot_qsys_regmap[] = { - REG(QSYS_PORT_MODE, 0x011200), - REG(QSYS_SWITCH_PORT_MODE, 0x011234), - REG(QSYS_STAT_CNT_CFG, 0x011264), - REG(QSYS_EEE_CFG, 0x011268), - REG(QSYS_EEE_THRES, 0x011294), - REG(QSYS_IGR_NO_SHARING, 0x011298), - REG(QSYS_EGR_NO_SHARING, 0x01129c), - REG(QSYS_SW_STATUS, 0x0112a0), - REG(QSYS_EXT_CPU_CFG, 0x0112d0), - REG(QSYS_PAD_CFG, 0x0112d4), - REG(QSYS_CPU_GROUP_MAP, 0x0112d8), - REG(QSYS_QMAP, 0x0112dc), - REG(QSYS_ISDX_SGRP, 0x011400), - REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), - REG(QSYS_TFRM_MISC, 0x011310), - REG(QSYS_TFRM_PORT_DLY, 0x011314), - REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), - REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), - REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), - REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), - REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), - REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), - REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), - REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), - REG(QSYS_RED_PROFILE, 0x011338), - REG(QSYS_RES_QOS_MODE, 0x011378), - REG(QSYS_RES_CFG, 0x012000), - REG(QSYS_RES_STAT, 0x012004), - REG(QSYS_EGR_DROP_MODE, 0x01137c), - REG(QSYS_EQ_CTRL, 0x011380), - REG(QSYS_EVENTS_CORE, 0x011384), - REG(QSYS_CIR_CFG, 0x000000), - REG(QSYS_EIR_CFG, 0x000004), - REG(QSYS_SE_CFG, 0x000008), - REG(QSYS_SE_DWRR_CFG, 0x00000c), - REG(QSYS_SE_CONNECT, 0x00003c), - REG(QSYS_SE_DLB_SENSE, 0x000040), - REG(QSYS_CIR_STATE, 0x000044), - REG(QSYS_EIR_STATE, 0x000048), - REG(QSYS_SE_STATE, 0x00004c), - REG(QSYS_HSCH_MISC_CFG, 0x011388), -}; - -static const u32 ocelot_rew_regmap[] = { - REG(REW_PORT_VLAN_CFG, 0x000000), - REG(REW_TAG_CFG, 0x000004), - REG(REW_PORT_CFG, 0x000008), - REG(REW_DSCP_CFG, 0x00000c), - REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), - REG(REW_PTP_CFG, 0x000050), - REG(REW_PTP_DLY1_CFG, 0x000054), - REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), - REG(REW_DSCP_REMAP_CFG, 0x000790), - REG(REW_STAT_CFG, 0x000890), - REG(REW_PPT, 0x000680), -}; - -static const u32 ocelot_sys_regmap[] = { - REG(SYS_COUNT_RX_OCTETS, 0x000000), - REG(SYS_COUNT_RX_UNICAST, 0x000004), - REG(SYS_COUNT_RX_MULTICAST, 0x000008), - REG(SYS_COUNT_RX_BROADCAST, 0x00000c), - REG(SYS_COUNT_RX_SHORTS, 0x000010), - REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), - REG(SYS_COUNT_RX_JABBERS, 0x000018), - REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), - REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), - REG(SYS_COUNT_RX_64, 0x000024), - REG(SYS_COUNT_RX_65_127, 0x000028), - REG(SYS_COUNT_RX_128_255, 0x00002c), - REG(SYS_COUNT_RX_256_1023, 0x000030), - REG(SYS_COUNT_RX_1024_1526, 0x000034), - REG(SYS_COUNT_RX_1527_MAX, 0x000038), - REG(SYS_COUNT_RX_PAUSE, 0x00003c), - REG(SYS_COUNT_RX_CONTROL, 0x000040), - REG(SYS_COUNT_RX_LONGS, 0x000044), - REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), - REG(SYS_COUNT_TX_OCTETS, 0x000100), - REG(SYS_COUNT_TX_UNICAST, 0x000104), - REG(SYS_COUNT_TX_MULTICAST, 0x000108), - REG(SYS_COUNT_TX_BROADCAST, 0x00010c), - REG(SYS_COUNT_TX_COLLISION, 0x000110), - REG(SYS_COUNT_TX_DROPS, 0x000114), - REG(SYS_COUNT_TX_PAUSE, 0x000118), - REG(SYS_COUNT_TX_64, 0x00011c), - REG(SYS_COUNT_TX_65_127, 0x000120), - REG(SYS_COUNT_TX_128_511, 0x000124), - REG(SYS_COUNT_TX_512_1023, 0x000128), - REG(SYS_COUNT_TX_1024_1526, 0x00012c), - REG(SYS_COUNT_TX_1527_MAX, 0x000130), - REG(SYS_COUNT_TX_AGING, 0x000170), - REG(SYS_RESET_CFG, 0x000508), - REG(SYS_CMID, 0x00050c), - REG(SYS_VLAN_ETYPE_CFG, 0x000510), - REG(SYS_PORT_MODE, 0x000514), - REG(SYS_FRONT_PORT_MODE, 0x000548), - REG(SYS_FRM_AGING, 0x000574), - REG(SYS_STAT_CFG, 0x000578), - REG(SYS_SW_STATUS, 0x00057c), - REG(SYS_MISC_CFG, 0x0005ac), - REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), - REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), - REG(SYS_CM_ADDR, 0x000500), - REG(SYS_CM_DATA, 0x000504), - REG(SYS_PAUSE_CFG, 0x000608), - REG(SYS_PAUSE_TOT_CFG, 0x000638), - REG(SYS_ATOP, 0x00063c), - REG(SYS_ATOP_TOT_CFG, 0x00066c), - REG(SYS_MAC_FC_CFG, 0x000670), - REG(SYS_MMGT, 0x00069c), - REG(SYS_MMGT_FAST, 0x0006a0), - REG(SYS_EVENTS_DIF, 0x0006a4), - REG(SYS_EVENTS_CORE, 0x0006b4), - REG(SYS_CNT, 0x000000), - REG(SYS_PTP_STATUS, 0x0006b8), - REG(SYS_PTP_TXSTAMP, 0x0006bc), - REG(SYS_PTP_NXT, 0x0006c0), - REG(SYS_PTP_CFG, 0x0006c4), -}; - -static const u32 ocelot_vcap_regmap[] = { - /* VCAP_CORE_CFG */ - REG(VCAP_CORE_UPDATE_CTRL, 0x000000), - REG(VCAP_CORE_MV_CFG, 0x000004), - /* VCAP_CORE_CACHE */ - REG(VCAP_CACHE_ENTRY_DAT, 0x000008), - REG(VCAP_CACHE_MASK_DAT, 0x000108), - REG(VCAP_CACHE_ACTION_DAT, 0x000208), - REG(VCAP_CACHE_CNT_DAT, 0x000308), - REG(VCAP_CACHE_TG_DAT, 0x000388), - /* VCAP_CONST */ - REG(VCAP_CONST_VCAP_VER, 0x000398), - REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), - REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), - REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), - REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), - REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), - REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), - REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), - REG(VCAP_CONST_CORE_CNT, 0x0003b8), - REG(VCAP_CONST_IF_CNT, 0x0003bc), -}; - -static const u32 ocelot_ptp_regmap[] = { - REG(PTP_PIN_CFG, 0x000000), - REG(PTP_PIN_TOD_SEC_MSB, 0x000004), - REG(PTP_PIN_TOD_SEC_LSB, 0x000008), - REG(PTP_PIN_TOD_NSEC, 0x00000c), - REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), - REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), - REG(PTP_CFG_MISC, 0x0000a0), - REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), - REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), -}; - -static const u32 ocelot_dev_gmii_regmap[] = { - REG(DEV_CLOCK_CFG, 0x0), - REG(DEV_PORT_MISC, 0x4), - REG(DEV_EVENTS, 0x8), - REG(DEV_EEE_CFG, 0xc), - REG(DEV_RX_PATH_DELAY, 0x10), - REG(DEV_TX_PATH_DELAY, 0x14), - REG(DEV_PTP_PREDICT_CFG, 0x18), - REG(DEV_MAC_ENA_CFG, 0x1c), - REG(DEV_MAC_MODE_CFG, 0x20), - REG(DEV_MAC_MAXLEN_CFG, 0x24), - REG(DEV_MAC_TAGS_CFG, 0x28), - REG(DEV_MAC_ADV_CHK_CFG, 0x2c), - REG(DEV_MAC_IFG_CFG, 0x30), - REG(DEV_MAC_HDX_CFG, 0x34), - REG(DEV_MAC_DBG_CFG, 0x38), - REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), - REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), - REG(DEV_MAC_STICKY, 0x44), - REG(PCS1G_CFG, 0x48), - REG(PCS1G_MODE_CFG, 0x4c), - REG(PCS1G_SD_CFG, 0x50), - REG(PCS1G_ANEG_CFG, 0x54), - REG(PCS1G_ANEG_NP_CFG, 0x58), - REG(PCS1G_LB_CFG, 0x5c), - REG(PCS1G_DBG_CFG, 0x60), - REG(PCS1G_CDET_CFG, 0x64), - REG(PCS1G_ANEG_STATUS, 0x68), - REG(PCS1G_ANEG_NP_STATUS, 0x6c), - REG(PCS1G_LINK_STATUS, 0x70), - REG(PCS1G_LINK_DOWN_CNT, 0x74), - REG(PCS1G_STICKY, 0x78), - REG(PCS1G_DEBUG_STATUS, 0x7c), - REG(PCS1G_LPI_CFG, 0x80), - REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), - REG(PCS1G_LPI_STATUS, 0x88), - REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), - REG(PCS1G_TSTPAT_STATUS, 0x90), - REG(DEV_PCS_FX100_CFG, 0x94), - REG(DEV_PCS_FX100_STATUS, 0x98), -}; +#define VSC7514_VCAP_POLICER_BASE 128 +#define VSC7514_VCAP_POLICER_MAX 191 static const u32 *ocelot_regmap[TARGET_MAX] = { - [ANA] = ocelot_ana_regmap, - [QS] = ocelot_qs_regmap, - [QSYS] = ocelot_qsys_regmap, - [REW] = ocelot_rew_regmap, - [SYS] = ocelot_sys_regmap, - [S0] = ocelot_vcap_regmap, - [S1] = ocelot_vcap_regmap, - [S2] = ocelot_vcap_regmap, - [PTP] = ocelot_ptp_regmap, - [DEV_GMII] = ocelot_dev_gmii_regmap, + [ANA] = vsc7514_ana_regmap, + [QS] = vsc7514_qs_regmap, + [QSYS] = vsc7514_qsys_regmap, + [REW] = vsc7514_rew_regmap, + [SYS] = vsc7514_sys_regmap, + [S0] = vsc7514_vcap_regmap, + [S1] = vsc7514_vcap_regmap, + [S2] = vsc7514_vcap_regmap, + [PTP] = vsc7514_ptp_regmap, + [DEV_GMII] = vsc7514_dev_gmii_regmap, }; static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { @@ -633,211 +344,6 @@ static const struct ocelot_ops ocelot_ops = { .netdev_to_port = ocelot_netdev_to_port, }; -static const struct vcap_field vsc7514_vcap_es0_keys[] = { - [VCAP_ES0_EGR_PORT] = { 0, 4}, - [VCAP_ES0_IGR_PORT] = { 4, 4}, - [VCAP_ES0_RSV] = { 8, 2}, - [VCAP_ES0_L2_MC] = { 10, 1}, - [VCAP_ES0_L2_BC] = { 11, 1}, - [VCAP_ES0_VID] = { 12, 12}, - [VCAP_ES0_DP] = { 24, 1}, - [VCAP_ES0_PCP] = { 25, 3}, -}; - -static const struct vcap_field vsc7514_vcap_es0_actions[] = { - [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2}, - [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1}, - [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2}, - [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1}, - [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2}, - [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2}, - [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2}, - [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1}, - [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2}, - [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2}, - [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12}, - [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3}, - [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1}, - [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12}, - [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3}, - [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1}, - [VCAP_ES0_ACT_RSV] = { 49, 24}, - [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is1_keys[] = { - [VCAP_IS1_HK_TYPE] = { 0, 1}, - [VCAP_IS1_HK_LOOKUP] = { 1, 2}, - [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12}, - [VCAP_IS1_HK_RSV] = { 15, 9}, - [VCAP_IS1_HK_OAM_Y1731] = { 24, 1}, - [VCAP_IS1_HK_L2_MC] = { 25, 1}, - [VCAP_IS1_HK_L2_BC] = { 26, 1}, - [VCAP_IS1_HK_IP_MC] = { 27, 1}, - [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1}, - [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1}, - [VCAP_IS1_HK_TPID] = { 30, 1}, - [VCAP_IS1_HK_VID] = { 31, 12}, - [VCAP_IS1_HK_DEI] = { 43, 1}, - [VCAP_IS1_HK_PCP] = { 44, 3}, - /* Specific Fields for IS1 Half Key S1_NORMAL */ - [VCAP_IS1_HK_L2_SMAC] = { 47, 48}, - [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1}, - [VCAP_IS1_HK_ETYPE] = { 96, 16}, - [VCAP_IS1_HK_IP_SNAP] = {112, 1}, - [VCAP_IS1_HK_IP4] = {113, 1}, - /* Layer-3 Information */ - [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1}, - [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1}, - [VCAP_IS1_HK_L3_OPTIONS] = {116, 1}, - [VCAP_IS1_HK_L3_DSCP] = {117, 6}, - [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32}, - /* Layer-4 Information */ - [VCAP_IS1_HK_TCP_UDP] = {155, 1}, - [VCAP_IS1_HK_TCP] = {156, 1}, - [VCAP_IS1_HK_L4_SPORT] = {157, 16}, - [VCAP_IS1_HK_L4_RNG] = {173, 8}, - /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ - [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1}, - [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12}, - [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1}, - [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3}, - [VCAP_IS1_HK_IP4_IP4] = { 64, 1}, - [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1}, - [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1}, - [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1}, - [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6}, - [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32}, - [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32}, - [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8}, - [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1}, - [VCAP_IS1_HK_IP4_TCP] = {147, 1}, - [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8}, - [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32}, -}; - -static const struct vcap_field vsc7514_vcap_is1_actions[] = { - [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1}, - [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6}, - [VCAP_IS1_ACT_QOS_ENA] = { 7, 1}, - [VCAP_IS1_ACT_QOS_VAL] = { 8, 3}, - [VCAP_IS1_ACT_DP_ENA] = { 11, 1}, - [VCAP_IS1_ACT_DP_VAL] = { 12, 1}, - [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8}, - [VCAP_IS1_ACT_PAG_VAL] = { 21, 8}, - [VCAP_IS1_ACT_RSV] = { 29, 9}, - /* The fields below are incorrectly shifted by 2 in the manual */ - [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1}, - [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12}, - [VCAP_IS1_ACT_FID_SEL] = { 51, 2}, - [VCAP_IS1_ACT_FID_VAL] = { 53, 13}, - [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1}, - [VCAP_IS1_ACT_PCP_VAL] = { 67, 3}, - [VCAP_IS1_ACT_DEI_VAL] = { 70, 1}, - [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1}, - [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2}, - [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4}, - [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is2_keys[] = { - /* Common: 46 bits */ - [VCAP_IS2_TYPE] = { 0, 4}, - [VCAP_IS2_HK_FIRST] = { 4, 1}, - [VCAP_IS2_HK_PAG] = { 5, 8}, - [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12}, - [VCAP_IS2_HK_RSV2] = { 25, 1}, - [VCAP_IS2_HK_HOST_MATCH] = { 26, 1}, - [VCAP_IS2_HK_L2_MC] = { 27, 1}, - [VCAP_IS2_HK_L2_BC] = { 28, 1}, - [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1}, - [VCAP_IS2_HK_VID] = { 30, 12}, - [VCAP_IS2_HK_DEI] = { 42, 1}, - [VCAP_IS2_HK_PCP] = { 43, 3}, - /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ - [VCAP_IS2_HK_L2_DMAC] = { 46, 48}, - [VCAP_IS2_HK_L2_SMAC] = { 94, 48}, - /* MAC_ETYPE (TYPE=000) */ - [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8}, - [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3}, - /* MAC_LLC (TYPE=001) */ - [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40}, - /* MAC_SNAP (TYPE=010) */ - [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40}, - /* MAC_ARP (TYPE=011) */ - [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48}, - [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1}, - [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1}, - [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1}, - [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1}, - [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1}, - [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1}, - [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2}, - [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32}, - [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32}, - [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1}, - /* IP4_TCP_UDP / IP4_OTHER common */ - [VCAP_IS2_HK_IP4] = { 46, 1}, - [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1}, - [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1}, - [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1}, - [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1}, - [VCAP_IS2_HK_L3_TOS] = { 51, 8}, - [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32}, - [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32}, - [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1}, - /* IP4_TCP_UDP (TYPE=100) */ - [VCAP_IS2_HK_TCP] = {124, 1}, - [VCAP_IS2_HK_L4_DPORT] = {125, 16}, - [VCAP_IS2_HK_L4_SPORT] = {141, 16}, - [VCAP_IS2_HK_L4_RNG] = {157, 8}, - [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1}, - [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1}, - [VCAP_IS2_HK_L4_FIN] = {167, 1}, - [VCAP_IS2_HK_L4_SYN] = {168, 1}, - [VCAP_IS2_HK_L4_RST] = {169, 1}, - [VCAP_IS2_HK_L4_PSH] = {170, 1}, - [VCAP_IS2_HK_L4_ACK] = {171, 1}, - [VCAP_IS2_HK_L4_URG] = {172, 1}, - [VCAP_IS2_HK_L4_1588_DOM] = {173, 8}, - [VCAP_IS2_HK_L4_1588_VER] = {181, 4}, - /* IP4_OTHER (TYPE=101) */ - [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8}, - [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56}, - /* IP6_STD (TYPE=110) */ - [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1}, - [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128}, - [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8}, - /* OAM (TYPE=111) */ - [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7}, - [VCAP_IS2_HK_OAM_VER] = {149, 5}, - [VCAP_IS2_HK_OAM_OPCODE] = {154, 8}, - [VCAP_IS2_HK_OAM_FLAGS] = {162, 8}, - [VCAP_IS2_HK_OAM_MEPID] = {170, 16}, - [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1}, - [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1}, -}; - -static const struct vcap_field vsc7514_vcap_is2_actions[] = { - [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, - [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, - [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, - [VCAP_IS2_ACT_MASK_MODE] = { 5, 2}, - [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1}, - [VCAP_IS2_ACT_LRN_DIS] = { 8, 1}, - [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, - [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9}, - [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1}, - [VCAP_IS2_ACT_PORT_MASK] = { 20, 11}, - [VCAP_IS2_ACT_REW_OP] = { 31, 9}, - [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1}, - [VCAP_IS2_ACT_RSV] = { 41, 2}, - [VCAP_IS2_ACT_ACL_ID] = { 43, 6}, - [VCAP_IS2_ACT_HIT_CNT] = { 49, 32}, -}; - static struct vcap_props vsc7514_vcap_props[] = { [VCAP_ES0] = { .action_type_width = 0, @@ -1045,6 +551,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { S1, "s1" }, { S2, "s2" }, { PTP, "ptp", 1 }, + { FDMA, "fdma", 1 }, }; if (!np && !pdev->dev.platform_data) @@ -1080,6 +587,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[io_target[i].id] = target; } + if (ocelot->targets[FDMA]) + ocelot_fdma_init(pdev, ocelot); + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); if (IS_ERR(hsio)) { dev_err(&pdev->dev, "missing hsio syscon\n"); @@ -1129,6 +639,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->num_flooding_pgids = 1; ocelot->vcap = vsc7514_vcap_props; + + ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE; + ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX; + ocelot->npi = -1; err = ocelot_init(ocelot); @@ -1139,6 +653,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_ocelot_devlink_unregister; + if (ocelot->fdma) + ocelot_fdma_start(ocelot); + err = ocelot_devlink_sb_register(ocelot); if (err) goto out_ocelot_release_ports; @@ -1179,6 +696,8 @@ static int mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); + if (ocelot->fdma) + ocelot_fdma_deinit(ocelot); devlink_unregister(ocelot->devlink); ocelot_deinit_timestamp(ocelot); ocelot_devlink_sb_unregister(ocelot); diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c new file mode 100644 index 000000000000..c2af4eb8ca5d --- /dev/null +++ b/drivers/net/ethernet/mscc/vsc7514_regs.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2021 Innovative Advantage + */ +#include <soc/mscc/ocelot_vcap.h> +#include <soc/mscc/vsc7514_regs.h> +#include "ocelot.h" + +const u32 vsc7514_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; +EXPORT_SYMBOL(vsc7514_ana_regmap); + +const u32 vsc7514_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; +EXPORT_SYMBOL(vsc7514_qs_regmap); + +const u32 vsc7514_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; +EXPORT_SYMBOL(vsc7514_qsys_regmap); + +const u32 vsc7514_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; +EXPORT_SYMBOL(vsc7514_rew_regmap); + +const u32 vsc7514_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_PAUSE, 0x00003c), + REG(SYS_COUNT_RX_CONTROL, 0x000040), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000170), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_CNT, 0x000000), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; +EXPORT_SYMBOL(vsc7514_sys_regmap); + +const u32 vsc7514_vcap_regmap[] = { + /* VCAP_CORE_CFG */ + REG(VCAP_CORE_UPDATE_CTRL, 0x000000), + REG(VCAP_CORE_MV_CFG, 0x000004), + /* VCAP_CORE_CACHE */ + REG(VCAP_CACHE_ENTRY_DAT, 0x000008), + REG(VCAP_CACHE_MASK_DAT, 0x000108), + REG(VCAP_CACHE_ACTION_DAT, 0x000208), + REG(VCAP_CACHE_CNT_DAT, 0x000308), + REG(VCAP_CACHE_TG_DAT, 0x000388), + /* VCAP_CONST */ + REG(VCAP_CONST_VCAP_VER, 0x000398), + REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c), + REG(VCAP_CONST_ENTRY_CNT, 0x0003a0), + REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4), + REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8), + REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac), + REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0), + REG(VCAP_CONST_CNT_WIDTH, 0x0003b4), + REG(VCAP_CONST_CORE_CNT, 0x0003b8), + REG(VCAP_CONST_IF_CNT, 0x0003bc), +}; +EXPORT_SYMBOL(vsc7514_vcap_regmap); + +const u32 vsc7514_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), + REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; +EXPORT_SYMBOL(vsc7514_ptp_regmap); + +const u32 vsc7514_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG(PCS1G_CFG, 0x48), + REG(PCS1G_MODE_CFG, 0x4c), + REG(PCS1G_SD_CFG, 0x50), + REG(PCS1G_ANEG_CFG, 0x54), + REG(PCS1G_ANEG_NP_CFG, 0x58), + REG(PCS1G_LB_CFG, 0x5c), + REG(PCS1G_DBG_CFG, 0x60), + REG(PCS1G_CDET_CFG, 0x64), + REG(PCS1G_ANEG_STATUS, 0x68), + REG(PCS1G_ANEG_NP_STATUS, 0x6c), + REG(PCS1G_LINK_STATUS, 0x70), + REG(PCS1G_LINK_DOWN_CNT, 0x74), + REG(PCS1G_STICKY, 0x78), + REG(PCS1G_DEBUG_STATUS, 0x7c), + REG(PCS1G_LPI_CFG, 0x80), + REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), + REG(PCS1G_LPI_STATUS, 0x88), + REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), + REG(PCS1G_TSTPAT_STATUS, 0x90), + REG(DEV_PCS_FX100_CFG, 0x94), + REG(DEV_PCS_FX100_STATUS, 0x98), +}; +EXPORT_SYMBOL(vsc7514_dev_gmii_regmap); + +const struct vcap_field vsc7514_vcap_es0_keys[] = { + [VCAP_ES0_EGR_PORT] = { 0, 4 }, + [VCAP_ES0_IGR_PORT] = { 4, 4 }, + [VCAP_ES0_RSV] = { 8, 2 }, + [VCAP_ES0_L2_MC] = { 10, 1 }, + [VCAP_ES0_L2_BC] = { 11, 1 }, + [VCAP_ES0_VID] = { 12, 12 }, + [VCAP_ES0_DP] = { 24, 1 }, + [VCAP_ES0_PCP] = { 25, 3 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_es0_keys); + +const struct vcap_field vsc7514_vcap_es0_actions[] = { + [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 }, + [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 }, + [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 }, + [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 }, + [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 }, + [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 }, + [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 }, + [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 }, + [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 }, + [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 }, + [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 }, + [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 }, + [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 }, + [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 }, + [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 }, + [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 }, + [VCAP_ES0_ACT_RSV] = { 49, 24 }, + [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_es0_actions); + +const struct vcap_field vsc7514_vcap_is1_keys[] = { + [VCAP_IS1_HK_TYPE] = { 0, 1 }, + [VCAP_IS1_HK_LOOKUP] = { 1, 2 }, + [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 }, + [VCAP_IS1_HK_RSV] = { 15, 9 }, + [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 }, + [VCAP_IS1_HK_L2_MC] = { 25, 1 }, + [VCAP_IS1_HK_L2_BC] = { 26, 1 }, + [VCAP_IS1_HK_IP_MC] = { 27, 1 }, + [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 }, + [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 }, + [VCAP_IS1_HK_TPID] = { 30, 1 }, + [VCAP_IS1_HK_VID] = { 31, 12 }, + [VCAP_IS1_HK_DEI] = { 43, 1 }, + [VCAP_IS1_HK_PCP] = { 44, 3 }, + /* Specific Fields for IS1 Half Key S1_NORMAL */ + [VCAP_IS1_HK_L2_SMAC] = { 47, 48 }, + [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 }, + [VCAP_IS1_HK_ETYPE] = { 96, 16 }, + [VCAP_IS1_HK_IP_SNAP] = { 112, 1 }, + [VCAP_IS1_HK_IP4] = { 113, 1 }, + /* Layer-3 Information */ + [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 }, + [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 }, + [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 }, + [VCAP_IS1_HK_L3_DSCP] = { 117, 6 }, + [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 }, + /* Layer-4 Information */ + [VCAP_IS1_HK_TCP_UDP] = { 155, 1 }, + [VCAP_IS1_HK_TCP] = { 156, 1 }, + [VCAP_IS1_HK_L4_SPORT] = { 157, 16 }, + [VCAP_IS1_HK_L4_RNG] = { 173, 8 }, + /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */ + [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 }, + [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 }, + [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 }, + [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 }, + [VCAP_IS1_HK_IP4_IP4] = { 64, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 }, + [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 }, + [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 }, + [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 }, + [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 }, + [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 }, + [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 }, + [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 }, + [VCAP_IS1_HK_IP4_TCP] = { 147, 1 }, + [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 }, + [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is1_keys); + +const struct vcap_field vsc7514_vcap_is1_actions[] = { + [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 }, + [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 }, + [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 }, + [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 }, + [VCAP_IS1_ACT_DP_ENA] = { 11, 1 }, + [VCAP_IS1_ACT_DP_VAL] = { 12, 1 }, + [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 }, + [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 }, + [VCAP_IS1_ACT_RSV] = { 29, 9 }, + /* The fields below are incorrectly shifted by 2 in the manual */ + [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 }, + [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 }, + [VCAP_IS1_ACT_FID_SEL] = { 51, 2 }, + [VCAP_IS1_ACT_FID_VAL] = { 53, 13 }, + [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 }, + [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 }, + [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 }, + [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 }, + [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 }, + [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is1_actions); + +const struct vcap_field vsc7514_vcap_is2_keys[] = { + /* Common: 46 bits */ + [VCAP_IS2_TYPE] = { 0, 4 }, + [VCAP_IS2_HK_FIRST] = { 4, 1 }, + [VCAP_IS2_HK_PAG] = { 5, 8 }, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 }, + [VCAP_IS2_HK_RSV2] = { 25, 1 }, + [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 }, + [VCAP_IS2_HK_L2_MC] = { 27, 1 }, + [VCAP_IS2_HK_L2_BC] = { 28, 1 }, + [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 }, + [VCAP_IS2_HK_VID] = { 30, 12 }, + [VCAP_IS2_HK_DEI] = { 42, 1 }, + [VCAP_IS2_HK_PCP] = { 43, 3 }, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 46, 48 }, + [VCAP_IS2_HK_L2_SMAC] = { 94, 48 }, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 }, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 }, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 }, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 }, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 }, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 }, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 }, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 }, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 }, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 }, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 }, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 }, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 }, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 46, 1 }, + [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 }, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 }, + [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 }, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 }, + [VCAP_IS2_HK_L3_TOS] = { 51, 8 }, + [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 }, + [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 }, + [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 }, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = { 124, 1 }, + [VCAP_IS2_HK_L4_DPORT] = { 125, 16 }, + [VCAP_IS2_HK_L4_SPORT] = { 141, 16 }, + [VCAP_IS2_HK_L4_RNG] = { 157, 8 }, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 }, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 }, + [VCAP_IS2_HK_L4_FIN] = { 167, 1 }, + [VCAP_IS2_HK_L4_SYN] = { 168, 1 }, + [VCAP_IS2_HK_L4_RST] = { 169, 1 }, + [VCAP_IS2_HK_L4_PSH] = { 170, 1 }, + [VCAP_IS2_HK_L4_ACK] = { 171, 1 }, + [VCAP_IS2_HK_L4_URG] = { 172, 1 }, + [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 }, + [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 }, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 }, + [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 }, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 }, + [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 }, + [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 }, + /* OAM (TYPE=111) */ + [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 }, + [VCAP_IS2_HK_OAM_VER] = { 149, 5 }, + [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 }, + [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 }, + [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 }, + [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 }, + [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is2_keys); + +const struct vcap_field vsc7514_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 }, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 }, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 }, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 }, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 }, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 }, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 }, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 }, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 }, + [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 }, + [VCAP_IS2_ACT_REW_OP] = { 31, 9 }, + [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 }, + [VCAP_IS2_ACT_RSV] = { 41, 2 }, + [VCAP_IS2_ACT_ACL_ID] = { 43, 6 }, + [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 }, +}; +EXPORT_SYMBOL(vsc7514_vcap_is2_actions); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 5736fcdafd7a..50ac3ee2577a 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1704,7 +1704,9 @@ myri10ge_set_pauseparam(struct net_device *netdev, static void myri10ge_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct myri10ge_priv *mgp = netdev_priv(netdev); @@ -3740,7 +3742,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct myri10ge_priv *mgp; struct device *dev = &pdev->dev; int status = -ENXIO; - int dac_enabled; unsigned hdr_offset, ss_offset; static int board_number; @@ -3780,15 +3781,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) myri10ge_mask_surprise_down(pdev); pci_set_master(pdev); - dac_enabled = 1; status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (status != 0) { - dac_enabled = 0; - dev_err(&pdev->dev, - "64-bit pci address mask was refused, trying 32-bit\n"); - status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - } - if (status != 0) { dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); goto abort_with_enabled; } @@ -3872,10 +3866,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - netdev->features = netdev->hw_features; - - if (dac_enabled) - netdev->features |= NETIF_F_HIGHDMA; + netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features |= mgp->features; if (mgp->fw_ver_tiny < 37) diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index d74a80f010c5..3f371faeb6d0 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -114,6 +114,7 @@ static int sonic_probe1(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); int err = -ENODEV; int i; + unsigned char addr[ETH_ALEN]; if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) return -EBUSY; @@ -143,9 +144,10 @@ static int sonic_probe1(struct net_device *dev) SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { val = SONIC_READ(SONIC_CAP0-i); - dev->dev_addr[i*2] = val; - dev->dev_addr[i*2+1] = val >> 8; + addr[i*2] = val; + addr[i*2+1] = val >> 8; } + eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 8709d700e15a..b16f7c830f9b 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -203,6 +203,7 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; unsigned short val; + u8 addr[ETH_ALEN]; /* * On NuBus boards we can sometimes look in the ROM resources. @@ -213,7 +214,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) int i; for (i = 0; i < 6; i++) - dev->dev_addr[i] = SONIC_READ_PROM(i); + addr[i] = SONIC_READ_PROM(i); + eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; @@ -222,7 +224,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) * source has a rather long and detailed historical account of * why this is so. */ - bit_reverse_addr(dev->dev_addr); + bit_reverse_addr(addr); + eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; @@ -243,14 +246,15 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) SONIC_WRITE(SONIC_CEP, 15); val = SONIC_READ(SONIC_CAP2); - dev->dev_addr[5] = val >> 8; - dev->dev_addr[4] = val & 0xff; + addr[5] = val >> 8; + addr[4] = val & 0xff; val = SONIC_READ(SONIC_CAP1); - dev->dev_addr[3] = val >> 8; - dev->dev_addr[2] = val & 0xff; + addr[3] = val >> 8; + addr[2] = val & 0xff; val = SONIC_READ(SONIC_CAP0); - dev->dev_addr[1] = val >> 8; - dev->dev_addr[0] = val & 0xff; + addr[1] = val >> 8; + addr[0] = val & 0xff; + eth_hw_addr_set(dev, addr); if (!INVALID_MAC(dev->dev_addr)) return; @@ -355,13 +359,16 @@ static int mac_onboard_sonic_probe(struct net_device *dev) static int mac_sonic_nubus_ethernet_addr(struct net_device *dev, unsigned long prom_addr, int id) { + u8 addr[ETH_ALEN]; int i; + for(i = 0; i < 6; i++) - dev->dev_addr[i] = SONIC_READ_PROM(i); + addr[i] = SONIC_READ_PROM(i); /* Some of the addresses are bit-reversed */ if (id != MACSONIC_DAYNA) - bit_reverse_addr(dev->dev_addr); + bit_reverse_addr(addr); + eth_hw_addr_set(dev, addr); return 0; } diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index ca4686094701..52fef34d43f9 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -120,13 +120,14 @@ static const struct net_device_ops xtsonic_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int __init sonic_probe1(struct net_device *dev) +static int sonic_probe1(struct net_device *dev) { unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; int i; int err = 0; + unsigned char addr[ETH_ALEN]; if (!request_mem_region(base_addr, 0x100, xtsonic_string)) return -EBUSY; @@ -163,9 +164,10 @@ static int __init sonic_probe1(struct net_device *dev) for (i=0; i<3; i++) { unsigned int val = SONIC_READ(SONIC_CAP0-i); - dev->dev_addr[i*2] = val; - dev->dev_addr[i*2+1] = val >> 8; + addr[i*2] = val; + addr[i*2+1] = val >> 8; } + eth_hw_addr_set(dev, addr); lp->dma_bitmode = SONIC_BITMODE32; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index d1c32c65db05..6dd451adc331 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -5461,8 +5461,11 @@ static int s2io_ethtool_set_led(struct net_device *dev, return 0; } -static void s2io_ethtool_gringparam(struct net_device *dev, - struct ethtool_ringparam *ering) +static void +s2io_ethtool_gringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct s2io_nic *sp = netdev_priv(dev); int i, tx_desc_count = 0, rx_desc_count = 0; @@ -7655,7 +7658,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) struct s2io_nic *sp; struct net_device *dev; int i, j, ret; - int dma_flag = false; u32 mac_up, mac_down; u64 val64 = 0, tmp64 = 0; struct XENA_dev_config __iomem *bar0 = NULL; @@ -7677,17 +7679,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) return ret; } - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__); - dma_flag = true; - if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - DBG_PRINT(ERR_DBG, - "Unable to obtain 64bit DMA for coherent allocations\n"); - pci_disable_device(pdev); - return -ENOMEM; - } - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__); } else { pci_disable_device(pdev); return -ENOMEM; @@ -7717,7 +7710,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp = netdev_priv(dev); sp->dev = dev; sp->pdev = pdev; - sp->high_dma_flag = dma_flag; sp->device_enabled_once = false; if (rx_ring_mode == 1) sp->rxd_mode = RXD_MODE_1; @@ -7865,9 +7857,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO; dev->features |= dev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (sp->high_dma_flag == true) - dev->features |= NETIF_F_HIGHDMA; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HIGHDMA; dev->watchdog_timeo = WATCH_DOG_TIMEOUT; INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); INIT_WORK(&sp->set_link_task, s2io_set_link); diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index a4266d1544ab..cb7080eb5912 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -873,7 +873,6 @@ struct s2io_nic { struct mac_addr def_mac_addr[256]; struct net_device_stats stats; - int high_dma_flag; int device_enabled_once; char name[60]; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 1969009a91e7..aa7c093f1f91 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3159,10 +3159,6 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data) if (copy_from_user(&config, data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - /* Transmit HW Timestamp not supported */ switch (config.tx_type) { case HWTSTAMP_TX_OFF: @@ -3353,7 +3349,7 @@ static const struct net_device_ops vxge_netdev_ops = { }; static int vxge_device_register(struct __vxge_hw_device *hldev, - struct vxge_config *config, int high_dma, + struct vxge_config *config, int no_of_vpath, struct vxgedev **vdev_out) { struct net_device *ndev; @@ -3425,11 +3421,7 @@ static int vxge_device_register(struct __vxge_hw_device *hldev, vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s : checksumming enabled", __func__); - if (high_dma) { - ndev->features |= NETIF_F_HIGHDMA; - vxge_debug_init(vxge_hw_device_trace_level_get(hldev), - "%s : using High DMA", __func__); - } + ndev->features |= NETIF_F_HIGHDMA; /* MTU range: 68 - 9600 */ ndev->min_mtu = VXGE_HW_MIN_MTU; @@ -4286,7 +4278,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) struct __vxge_hw_device *hldev; enum vxge_hw_status status; int ret; - int high_dma = 0; u64 vpath_mask = 0; struct vxgedev *vdev; struct vxge_config *ll_config = NULL; @@ -4376,22 +4367,9 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit0; } - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 64bit DMA", __func__); - - high_dma = 1; - - if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { - vxge_debug_init(VXGE_ERR, - "%s : unable to obtain 64bit DMA for " - "consistent allocations", __func__); - ret = -ENOMEM; - goto _exit1; - } - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - vxge_debug_ll_config(VXGE_TRACE, - "%s : using 32bit DMA", __func__); } else { ret = -ENOMEM; goto _exit1; @@ -4559,8 +4537,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; - ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, - &vdev); + ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev); if (ret) { ret = -EINVAL; goto _exit4; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 2af9faee96c5..f448c5682594 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -43,15 +43,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) struct circ_buf *ring; ring = &priv->stats_ids.free_list; - /* Check if buffer is full. */ - if (!CIRC_SPACE(ring->head, ring->tail, - priv->stats_ring_size * NFP_FL_STATS_ELEM_RS - - NFP_FL_STATS_ELEM_RS + 1)) + /* Check if buffer is full, stats_ring_size must be power of 2 */ + if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size)) return -ENOBUFS; - memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); - ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % - (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); + /* Each increment of head represents size of NFP_FL_STATS_ELEM_RS */ + memcpy(&ring->buf[ring->head * NFP_FL_STATS_ELEM_RS], + &stats_context_id, NFP_FL_STATS_ELEM_RS); + ring->head = (ring->head + 1) & (priv->stats_ring_size - 1); return 0; } @@ -86,11 +85,14 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) return -ENOENT; } - memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS); + /* Each increment of tail represents size of NFP_FL_STATS_ELEM_RS */ + memcpy(&temp_stats_id, &ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], + NFP_FL_STATS_ELEM_RS); *stats_context_id = temp_stats_id; - memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); - ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % - (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); + memcpy(&ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], &freed_stats_id, + NFP_FL_STATS_ELEM_RS); + /* stats_ring_size must be power of 2 */ + ring->tail = (ring->tail + 1) & (priv->stats_ring_size - 1); return 0; } @@ -138,13 +140,18 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) struct circ_buf *ring; ring = &priv->mask_ids.mask_id_free_list; - /* Checking if buffer is full. */ + /* Checking if buffer is full, + * NFP_FLOWER_MASK_ENTRY_RS must be power of 2 + */ if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0) return -ENOBUFS; - memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS); - ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % - (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); + /* Each increment of head represents size of + * NFP_FLOWER_MASK_ELEMENT_RS + */ + memcpy(&ring->buf[ring->head * NFP_FLOWER_MASK_ELEMENT_RS], &mask_id, + NFP_FLOWER_MASK_ELEMENT_RS); + ring->head = (ring->head + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1); priv->mask_ids.last_used[mask_id] = ktime_get(); @@ -171,7 +178,11 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) if (ring->head == ring->tail) goto err_not_found; - memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS); + /* Each increment of tail represents size of + * NFP_FLOWER_MASK_ELEMENT_RS + */ + memcpy(&temp_id, &ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], + NFP_FLOWER_MASK_ELEMENT_RS); *mask_id = temp_id; reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id], @@ -180,9 +191,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) if (ktime_before(ktime_get(), reuse_timeout)) goto err_not_found; - memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS); - ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) % - (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); + memcpy(&ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], &freed_id, + NFP_FLOWER_MASK_ELEMENT_RS); + /* NFP_FLOWER_MASK_ENTRY_RS must be power of 2 */ + ring->tail = (ring->tail + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1); return 0; @@ -338,11 +350,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); - if (nfp_release_stats_entry(app, stats_cxt)) { - NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context"); - err = -EINVAL; - goto err_remove_rhash; - } err = -ENOENT; goto err_remove_rhash; } @@ -359,21 +366,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, check_entry = nfp_flower_search_fl_table(app, cookie, netdev); if (check_entry) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry"); - if (nfp_release_stats_entry(app, stats_cxt)) { - NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context"); - err = -EINVAL; - goto err_remove_mask; - } - - if (!nfp_flow->pre_tun_rule.dev && - !nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, - NULL, &new_mask_id)) { - NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); - err = -EINVAL; - goto err_remove_mask; - } - err = -EEXIST; goto err_remove_mask; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 224089d04d98..f97eff5afd12 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1867,6 +1867,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void * void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { + if (!netdev) + return -EOPNOTSUPP; + if (!nfp_fl_is_netdev_to_offload(netdev)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df203738511b..0b1865e9f0b5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -565,7 +565,6 @@ struct nfp_net_dp { * @exn_name: Name for Exception interrupt * @shared_handler: Handler for shared interrupts * @shared_name: Name for shared interrupt - * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active, * @reconfig_sync_present and HW reconfiguration request * regs/machinery from async requests (sync must take @@ -650,8 +649,6 @@ struct nfp_net { irq_handler_t shared_handler; char shared_name[IFNAMSIZ + 8]; - u32 me_freq_mhz; - bool link_up; spinlock_t link_status_lock; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 850bfdf83d0a..79257ec41987 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1944,7 +1944,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) xdp_prog, act); continue; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(dp->netdev, xdp_prog, act); @@ -4097,7 +4097,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = nn->max_mtu; - netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; + netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1de076f55740..e0c27471bcdb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -381,7 +381,9 @@ err_bad_set: } static void nfp_net_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); @@ -406,7 +408,9 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) } static int nfp_net_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; @@ -1344,7 +1348,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, * ME timestamp ticks. There are 16 ME clock cycles for each timestamp * count. */ - factor = nn->me_freq_mhz / 16; + factor = nn->tlv_caps.me_freq_mhz / 16; /* Each pair of (usecs, max_frames) fields specifies that interrupts * should be coalesced until diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 369f6ae700c7..181ac8e789a3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -286,8 +286,8 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower) if (repr->dst->u.port_info.lower_dev != lower) return; - netdev->gso_max_size = lower->gso_max_size; - netdev->gso_max_segs = lower->gso_max_segs; + netif_set_gso_max_size(netdev, lower->gso_max_size); + netif_set_gso_max_segs(netdev, lower->gso_max_segs); netdev_update_features(netdev); } @@ -381,7 +381,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, /* Advertise but disable TSO by default. */ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; + netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; netdev->features |= NETIF_F_LLTX; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index d7ac0307797f..34c0d2ddf9ef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) return -ENOMEM; cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) + if (!cache) { + nfp_cpp_area_free(area); return -ENOMEM; + } cache->id = 0; cache->addr = 0; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index cfeb7620ae20..07a00dd9cfe0 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev) cell = nvmem_cell_get(dev, "address"); if (IS_ERR(cell)) - return NULL; + return cell; mac = nvmem_cell_read(cell, &cell_size); nvmem_cell_put(cell); @@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev) ndev->max_mtu = NIXGE_JUMBO_MTU; mac_addr = nixge_get_nvmem_address(&pdev->dev); - if (mac_addr && is_valid_ether_addr(mac_addr)) { + if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) { eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 9b530d7509a4..660013f716d4 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -4651,7 +4651,10 @@ static int nv_nway_reset(struct net_device *dev) return ret; } -static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +static void nv_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fe_priv *np = netdev_priv(dev); @@ -4662,7 +4665,10 @@ static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* r ring->tx_pending = np->tx_ring_size; } -static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +static int nv_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 660b07cb5b92..84cc79e928c8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -270,9 +270,13 @@ static int pch_gbe_nway_reset(struct net_device *netdev) * pch_gbe_get_ringparam - Report ring sizes * @netdev: Network interface device structure * @ring: Ring param structure + * @kernel_ring: Ring external param structure + * @extack: netlink handle */ static void pch_gbe_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *txdr = adapter->tx_ring; @@ -288,12 +292,16 @@ static void pch_gbe_get_ringparam(struct net_device *netdev, * pch_gbe_set_ringparam - Set ring sizes * @netdev: Network interface device structure * @ring: Ring param structure + * @kernel_ring: Ring external param structure + * @extack: netlink handle * Returns * 0: Successful. * Negative value: Failed. */ static int pch_gbe_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *txdr, *tx_old; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 71d234291fc5..1dc40c537281 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -210,9 +210,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index e1a304886a3c..4c7e0c991105 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -69,7 +69,9 @@ pasemi_mac_ethtool_set_msglevel(struct net_device *netdev, static void pasemi_mac_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct pasemi_mac *mac = netdev_priv(netdev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index c54d735b9e2e..386a5cf1e224 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -512,7 +512,9 @@ static int ionic_set_coalesce(struct net_device *netdev, } static void ionic_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); @@ -523,7 +525,9 @@ static void ionic_get_ringparam(struct net_device *netdev, } static int ionic_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_queue_params qparam; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 63f8a8163b5f..2ff7be17e5af 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif) return -EINVAL; } - lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); + lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL); if (!lif->dbid_inuse) { dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index a075643f5826..3c4a84ea6321 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -392,7 +392,9 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, static void netxen_nic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(dev); @@ -430,7 +432,9 @@ netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) static int netxen_nic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(dev); u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 452494f8c298..65e20693c549 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1036,12 +1036,12 @@ static void qed_cid_map_free(struct qed_hwfn *p_hwfn) u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { - kfree(p_mngr->acquired[type].cid_map); + bitmap_free(p_mngr->acquired[type].cid_map); p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < MAX_NUM_VFS; vf++) { - kfree(p_mngr->acquired_vf[type][vf].cid_map); + bitmap_free(p_mngr->acquired_vf[type][vf].cid_map); p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } @@ -1054,15 +1054,10 @@ qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, u32 cid_start, u32 cid_count, struct qed_cid_acquired_map *p_map) { - u32 size; - if (!cid_count) return 0; - size = DIV_ROUND_UP(cid_count, - sizeof(unsigned long) * BITS_PER_BYTE) * - sizeof(unsigned long); - p_map->cid_map = kzalloc(size, GFP_KERNEL); + p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL); if (!p_map->cid_map) return -ENOMEM; @@ -1216,7 +1211,6 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) struct qed_cid_acquired_map *p_map; struct qed_conn_type_cfg *p_cfg; int type; - u32 len; /* Reset acquired cids */ for (type = 0; type < MAX_CONN_TYPES; type++) { @@ -1225,11 +1219,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) p_cfg = &p_mngr->conn_cfg[type]; if (p_cfg->cid_count) { p_map = &p_mngr->acquired[type]; - len = DIV_ROUND_UP(p_map->max_count, - sizeof(unsigned long) * - BITS_PER_BYTE) * - sizeof(unsigned long); - memset(p_map->cid_map, 0, len); + bitmap_zero(p_map->cid_map, p_map->max_count); } if (!p_cfg->cids_per_vf) @@ -1237,11 +1227,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) for (vf = 0; vf < MAX_NUM_VFS; vf++) { p_map = &p_mngr->acquired_vf[type][vf]; - len = DIV_ROUND_UP(p_map->max_count, - sizeof(unsigned long) * - BITS_PER_BYTE) * - sizeof(unsigned long); - memset(p_map->cid_map, 0, len); + bitmap_zero(p_map->cid_map, p_map->max_count); } } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index f2cedbd9489c..ed1a84542ad2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2721,6 +2721,25 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** + * qed_get_protocol_type_str(): Get a string for Protocol type. + * + * @protocol_type: Protocol type (using enum protocol_type). + * + * Return: String. + */ +const char *qed_get_protocol_type_str(u32 protocol_type); + +/** + * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID. + * + * @protocol_type: Protocol type (using enum protocol_type). + * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id). + * + * Return: String. + */ +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id); + +/** * qed_set_rdma_error_level(): Sets the RDMA assert level. * If the severity of the error will be * above the level, the FW will assert. diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 321c43408153..0ce37f2460a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -210,6 +210,82 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ XSTORM_PQ_INFO_OFFSET(pq_id)) +static const char * const s_protocol_types[] = { + "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE", + "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP", + "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON", + "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI", +}; + +static const char *s_ramrod_cmd_ids[][28] = { + { + "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC", + "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC", + "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN", + "ISCSI_RAMROD_CMD_ID_UPDATE_CONN", + "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN", + "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE", + "ISCSI_RAMROD_CMD_ID_CONN_STATS", }, + { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC", + "FCOE_RAMROD_CMD_ID_STAT_FUNC", + "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN", + "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP", + "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP", + "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP", + "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE", + "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP", + "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", }, + { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START", + "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP", + "CORE_RAMROD_TX_QUEUE_STOP", + "CORE_RAMROD_RX_QUEUE_FLUSH", + "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", }, + { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START", + "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP", + "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP", + "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP", + "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE", + "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION", + "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER", + "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER", + "ETH_RAMROD_RX_ADD_UDP_FILTER", + "ETH_RAMROD_RX_DELETE_UDP_FILTER", + "ETH_RAMROD_RX_CREATE_GFT_ACTION", + "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE", + "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL", + "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL", + "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", + "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR", + "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP", + "IWARP_RAMROD_CMD_ID_MODIFY_QP", + "IWARP_RAMROD_CMD_ID_DESTROY_QP", + "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", }, + { NULL }, /*TOE*/ + { NULL }, /*PREROCE*/ + { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START", + "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START", + "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE", + "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", } +}; + /******************** INTERNAL IMPLEMENTATION *********************/ /* Returns the external VOQ number */ @@ -1647,6 +1723,32 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } +const char *qed_get_protocol_type_str(u32 protocol_type) +{ + if (protocol_type >= ARRAY_SIZE(s_protocol_types)) + return "Invalid protocol type"; + + return s_protocol_types[protocol_type]; +} + +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id) +{ + const char *ramrod_cmd_id_str; + + if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids)) + return "Invalid protocol type"; + + if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0])) + return "Invalid Ramrod command ID"; + + ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id]; + + if (!ramrod_cmd_id_str) + return "Invalid Ramrod command ID"; + + return ramrod_cmd_id_str; +} + static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) { switch (storm_id) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index a97f691839e0..82e74f62b677 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, if (!parities) continue; - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if (qed_int_is_parity_flag(p_hwfn, p_bit) && @@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, * to current group, making them responsible for the * previous assertion. */ - for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { long unsigned int bitmask; u8 bit, bit_len; @@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); for (i = 0; i < NUM_ATTN_REGS; i++) { /* j is array index, k is bit index */ - for (j = 0, k = 0; k < 32; j++) { + for (j = 0, k = 0; k < 32 && j < 32; j++) { struct aeu_invert_reg_bit *p_aeu; p_aeu = &aeu_descs[i].bits[j]; @@ -2399,3 +2399,25 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info) +{ + u16 sbid = p_sb->igu_sb_id; + u32 i; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (sbid >= NUM_OF_SBS(p_hwfn->cdev)) + return -EINVAL; + + p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4); + p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4); + + for (i = 0; i < PIS_PER_SB; i++) + p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 84c17e97f569..7e5127f61744 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -186,6 +186,19 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** + * qed_int_get_sb_dbg: Read debug information regarding a given SB + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource + * @p_sb: pointer to status block for which we want to get info + * @p_info: pointer to struct to fill with information regarding SB + * + * Return: 0 with status block info filled on success, otherwise return error + */ +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info); + +/** * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 7673b3e07736..c5003fa1a25e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -255,27 +255,6 @@ static void __exit qed_exit(void) } module_exit(qed_exit); -/* Check if the DMA controller on the machine can properly handle the DMA - * addressing required by the device. - */ -static int qed_set_coherency_mask(struct qed_dev *cdev) -{ - struct device *dev = &cdev->pdev->dev; - - if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { - if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - DP_NOTICE(cdev, - "Can't request 64-bit consistent allocations\n"); - return -EIO; - } - } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { - DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); - return -EIO; - } - - return 0; -} - static void qed_free_pci(struct qed_dev *cdev) { struct pci_dev *pdev = cdev->pdev; @@ -351,9 +330,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) if (IS_PF(cdev) && !cdev->pci_params.pm_cap) DP_NOTICE(cdev, "Cannot find power management capability\n"); - rc = qed_set_coherency_mask(cdev); - if (rc) + rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + DP_NOTICE(cdev, "Can't request DMA addresses\n"); + rc = -EIO; goto err2; + } cdev->pci_params.mem_start = pci_resource_start(pdev, 0); cdev->pci_params.mem_end = pci_resource_end(pdev, 0); @@ -447,7 +429,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->wol_support = true; dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); - + dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, @@ -2936,6 +2918,30 @@ out: return status; } +static int +qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, + u16 qid, struct qed_sb_info_dbg *sb_dbg) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; + struct qed_ptt *ptt; + int rc; + + if (IS_VF(cdev)) + return -EINVAL; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Can't acquire PTT\n"); + return -EAGAIN; + } + + memset(sb_dbg, 0, sizeof(*sb_dbg)); + rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); + + qed_ptt_release(hwfn, ptt); + return rc; +} + static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len) { @@ -2978,11 +2984,54 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) return rc; } +static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) +{ + char buf[QED_MFW_REPORT_STR_SIZE]; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + va_list vl; + + va_start(vl, fmt); + vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); + va_end(vl); + + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + p_ptt = qed_ptt_acquire(p_hwfn); + if (p_ptt) { + qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); + qed_ptt_release(p_hwfn, p_ptt); + } + } +} + static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) { return QED_AFFIN_HWFN_IDX(cdev); } +static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + *esl_active = false; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@ -3038,6 +3087,9 @@ const struct qed_common_ops qed_common_ops_pass = { .read_nvm_cfg = &qed_nvm_flash_cfg_read, .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, .set_grc_config = &qed_set_grc_config, + .mfw_report = &qed_mfw_report, + .get_sb_info = &qed_get_sb_info, + .get_esl_status = &qed_get_esl_status, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 64678a256f3b..da1eadabcb41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -4158,3 +4158,25 @@ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, return qed_mcp_send_debug_data(p_hwfn, p_ptt, QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); } + +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); +} + +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, ¶m); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); + return rc; + } + + *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 564723800d15..369e1892450a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -15,6 +15,8 @@ #include "qed_hsi.h" #include "qed_dev_api.h" +#define QED_MFW_REPORT_STR_SIZE 256 + struct qed_mcp_link_speed_params { bool autoneg; @@ -1337,4 +1339,24 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 len); + +/** + * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not. + * + * @p_hwfn: hw function pointer + * + * Return: true if esl is supported, otherwise return false + */ +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_esl_status(): Get enhanced system lockdown status + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource pointer + * @active: ESL active status data pointer + * + * Return: 0 with esl status info on success, otherwise return error + */ +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h index 8a0e3c5d4bda..b70ee8200e15 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -1191,6 +1191,7 @@ enum drv_msg_code_enum { DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), + DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007), }; #define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 6f1a52e6beb2..b5e35f433a20 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -550,6 +550,8 @@ 0x1 << 1) #define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ 0x1 << 0) +#define IGU_REG_PRODUCER_MEMORY 0x182000UL +#define IGU_REG_CONSUMER_MEM 0x183000UL #define IGU_REG_MAPPING_MEMORY \ 0x184000UL #define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 648176dfb871..3b54da963554 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -85,10 +85,12 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, goto err; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", - opaque_cid, cmd, protocol, - (unsigned long)&p_ent->ramrod, + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n", + opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd), + cmd, qed_get_protocol_type_str(protocol), protocol, + (unsigned long long)(uintptr_t)&p_ent->ramrod, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index e0473729b161..d01b9245f811 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -139,10 +139,13 @@ err: if (!p_ptt) return -EBUSY; qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, - "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n", + "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, - p_ent->elem.hdr.protocol_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, le16_to_cpu(p_ent->elem.hdr.echo)); qed_ptt_release(p_hwfn, p_ptt); @@ -170,13 +173,16 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, return -EINVAL; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n", + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n", p_ent->elem.hdr.cid, + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, - p_ent->elem.hdr.protocol_id, - p_ent->elem.data_ptr.hi, - p_ent->elem.data_ptr.lo, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); @@ -271,17 +277,27 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, { qed_spq_async_comp_cb cb; - if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE)) + if (!p_hwfn->p_spq) return -EINVAL; + if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { + DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), + p_eqe->protocol_id); + + return -EINVAL; + } + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; if (cb) { return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, &p_eqe->data, p_eqe->fw_return_code); } else { DP_NOTICE(p_hwfn, - "Unknown Async completion for protocol: %d\n", + "Unknown Async completion for %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), p_eqe->protocol_id); + return -EINVAL; } } @@ -830,8 +846,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->recov_in_prog) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", - p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n", + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), + p_ent->elem.hdr.cmd_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id); /* Let the flow complete w/o any error handling */ qed_spq_recov_set_ret_code(p_ent, fw_return_code); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8284c4c1528f..97a7ab0826ed 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -168,6 +168,8 @@ enum { QEDE_PRI_FLAG_CMT, QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_RECOVER_ON_ERROR, + QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */ + QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */ QEDE_PRI_FLAG_LEN, }; @@ -175,6 +177,8 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", "SmartAN capable", "Recover on error", + "ESL capable", + "ESL active", }; enum qede_ethtool_tests { @@ -478,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); + bool esl_active; u32 flags = 0; if (edev->dev_info.common.num_hwfns > 1) @@ -489,6 +494,14 @@ static u32 qede_get_priv_flags(struct net_device *dev) if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE)) flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR); + if (edev->dev_info.common.esl) + flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT); + + edev->ops->common->get_esl_status(edev->cdev, &esl_active); + + if (esl_active) + flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE); + return flags; } @@ -888,7 +901,9 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, } static void qede_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); @@ -899,7 +914,9 @@ static void qede_get_ringparam(struct net_device *dev, } static int qede_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 065e9004598e..b242000a77fd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -10,6 +10,7 @@ #include <linux/bpf_trace.h> #include <net/udp_tunnel.h> #include <linux/ip.h> +#include <net/gro.h> #include <net/ipv6.h> #include <net/tcp.h> #include <linux/if_ether.h> @@ -1152,7 +1153,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, qede_rx_bd_ring_consume(rxq); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(edev->ndev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(edev->ndev, prog, act); @@ -1643,6 +1644,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) data_split = true; } } else { + if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { + DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); + qede_free_failed_tx_pkt(txq, first_bd, 0, false); + qede_update_tx_producer(txq); + return NETDEV_TX_OK; + } + val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 06c6a5813606..b4e5a15e308b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -509,34 +509,95 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return 0; } -static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) +static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp) { + char *p_sb = (char *)fp->sb_info->sb_virt; + u32 sb_size, i; + + sb_size = sizeof(struct status_block); + + for (i = 0; i < sb_size; i += 8) + DP_NOTICE(edev, + "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n", + p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3], + p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]); +} + +static void +qede_txq_fp_log_metadata(struct qede_dev *edev, + struct qede_fastpath *fp, struct qede_tx_queue *txq) +{ + struct qed_chain *p_chain = &txq->tx_pbl; + + /* Dump txq/fp/sb ids etc. other metadata */ DP_NOTICE(edev, - "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", - txq->index, le16_to_cpu(*txq->hw_cons_ptr), - qed_chain_get_cons_idx(&txq->tx_pbl), - qed_chain_get_prod_idx(&txq->tx_pbl), - jiffies); + "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n", + fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, + p_chain, p_chain->capacity, p_chain->size, jiffies, HZ); + + /* Dump all the relevant prod/cons indexes */ + DP_NOTICE(edev, + "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n", + le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, + qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain)); +} + +static void +qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) +{ + struct qed_sb_info_dbg sb_dbg; + int rc; + + /* sb info */ + qede_fp_sb_dump(edev, fp); + + memset(&sb_dbg, 0, sizeof(sb_dbg)); + rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg); + + DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n", + sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); + + /* report to mfw */ + edev->ops->common->mfw_report(edev->cdev, + "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", + txq->index, le16_to_cpu(*txq->hw_cons_ptr), + qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); + if (!rc) + edev->ops->common->mfw_report(edev->cdev, + "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n", + txq->index, fp->sb_info->igu_sb_id, + sb_dbg.igu_prod, sb_dbg.igu_cons, + sb_dbg.pi[TX_PI(txq->cos)]); } static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qede_dev *edev = netdev_priv(dev); - struct qede_tx_queue *txq; - int cos; + int i; netif_carrier_off(dev); DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); - if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX)) - return; + for_each_queue(i) { + struct qede_tx_queue *txq; + struct qede_fastpath *fp; + int cos; - for_each_cos_in_txq(edev, cos) { - txq = &edev->fp_array[txqueue].txq[cos]; + fp = &edev->fp_array[i]; + if (!(fp->type & QEDE_FASTPATH_TX)) + continue; - if (qed_chain_get_cons_idx(&txq->tx_pbl) != - qed_chain_get_prod_idx(&txq->tx_pbl)) - qede_tx_log_print(edev, txq); + for_each_cos_in_txq(edev, cos) { + txq = &fp->txq[cos]; + + /* Dump basic metadata for all queues */ + qede_txq_fp_log_metadata(edev, fp, txq); + + if (qed_chain_get_cons_idx(&txq->tx_pbl) != + qed_chain_get_prod_idx(&txq->tx_pbl)) + qede_tx_log_print(edev, fp, txq); + } } if (IS_VF(edev)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 8c28fabb0ff6..39176e765767 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -304,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", config.tx_type, config.rx_filter); - if (config.flags) { - DP_ERR(edev, "config.flags is reserved for future use\n"); - return -EINVAL; - } - ptp->hw_ts_ioctl_called = 1; ptp->tx_type = config.tx_type; ptp->rx_filter = config.rx_filter; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 1e6d72adfe43..b30589a135c2 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev) spin_lock_irqsave(&qdev->hw_lock, hw_flags); - err = ql_wait_for_drvr_lock(qdev); - if (err) { - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - netdev_err(ndev, "Releasing driver lock\n"); - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - } else { + if (!ql_wait_for_drvr_lock(qdev)) { netdev_err(ndev, "Could not acquire driver lock\n"); + err = -ENODEV; goto err_lock; } + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); @@ -3751,7 +3750,7 @@ static int ql3xxx_probe(struct pci_dev *pdev, struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found; - int pci_using_dac, err; + int err; err = pci_enable_device(pdev); if (err) { @@ -3767,11 +3766,7 @@ static int ql3xxx_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) - pci_using_dac = 1; - else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) - pci_using_dac = 0; - + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { pr_err("%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; @@ -3798,8 +3793,7 @@ static int ql3xxx_probe(struct pci_dev *pdev, qdev->msg_enable = netif_msg_init(debug, default_msg); - if (pci_using_dac) - ndev->features |= NETIF_F_HIGHDMA; + ndev->features |= NETIF_F_HIGHDMA; if (qdev->device_id == QL3032_DEVICE_ID) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index be7abee160e7..b25102fded7b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1698,7 +1698,7 @@ int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port); int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); -int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); +int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *); void qlcnic_set_netdev_features(struct qlcnic_adapter *, struct qlcnic_esw_func_cfg *); void qlcnic_sriov_vf_set_multi(struct net_device *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index d51bac7ba5af..bd0607680329 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; - ahw->hw_ops->alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_ADD_RCV_RINGS); + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to alloc mbx args %d\n", err); + return err; + } + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 6f1d9c1fd1b0..23cd47d588e5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -609,7 +609,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *); int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); -int qlcnic_83xx_init(struct qlcnic_adapter *, int); +int qlcnic_83xx_init(struct qlcnic_adapter *); int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); void qlcnic_83xx_idc_exit(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 27dffa299ca6..dbb800769cb6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2432,7 +2432,7 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter) qlcnic_set_sds_ring_count(adapter, rx_cnt); } -int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) +int qlcnic_83xx_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err = 0; @@ -2466,7 +2466,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) goto exit; if (qlcnic_sriov_vf_check(adapter)) { - err = qlcnic_sriov_vf_init(adapter, pci_using_dac); + err = qlcnic_sriov_vf_init(adapter); if (err) goto detach_mbx; else diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index fc364b4ab6eb..e10fe071a40f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -632,7 +632,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, static void qlcnic_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(dev); @@ -663,7 +665,9 @@ qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) static int qlcnic_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(dev); u16 num_rxd, num_jumbo_rxd, num_txd; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ed84f0f97623..d320567b2cca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2258,8 +2258,7 @@ static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, } int -qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, - int pci_using_dac) +qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev) { int err; struct pci_dev *pdev = adapter->pdev; @@ -2278,20 +2277,15 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO | - NETIF_F_HW_VLAN_CTAG_RX); + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA); netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA); if (QLCNIC_IS_TSO_CAPABLE(adapter)) { netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); } - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } - if (qlcnic_vlan_tx_check(adapter)) netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX); @@ -2341,20 +2335,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, return 0; } -static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac) -{ - if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) - *pci_using_dac = 1; - else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) - *pci_using_dac = 0; - else { - dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); - return -EIO; - } - - return 0; -} - void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter) { int ring; @@ -2441,8 +2421,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev = NULL; struct qlcnic_adapter *adapter = NULL; struct qlcnic_hardware_context *ahw; - int err, pci_using_dac = -1; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ + int err; err = pci_enable_device(pdev); if (err) @@ -2453,9 +2433,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_pdev; } - err = qlcnic_set_dma_mask(pdev, &pci_using_dac); - if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); goto err_out_disable_pdev; + } err = pci_request_regions(pdev, qlcnic_driver_name); if (err) @@ -2569,7 +2551,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; - err = qlcnic_83xx_init(adapter, pci_using_dac); + err = qlcnic_83xx_init(adapter); if (err) { switch (err) { case -ENOTRECOVERABLE: @@ -2633,7 +2615,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (adapter->portnum == 0) qlcnic_set_drv_version(adapter); - err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); + err = qlcnic_setup_netdev(adapter, netdev); if (err) goto err_out_disable_mbx_intr; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 7160b42f51dd..c42b99cd58bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -188,7 +188,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *, int); void qlcnic_sriov_cleanup(struct qlcnic_adapter *); void __qlcnic_sriov_cleanup(struct qlcnic_adapter *); void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *); -int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int); +int qlcnic_sriov_vf_init(struct qlcnic_adapter *); void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *); int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8); void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32); @@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u16); int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); void qlcnic_sriov_free_vlans(struct qlcnic_adapter *); -void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *); +int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *); bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *); void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *, struct qlcnic_vf_info *, u16); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index dd03be3fc82a..9282321c2e7f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; - int i, num_vlans; + int i, num_vlans, ret; u16 *vlans; if (sriov->allowed_vlans) @@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", sriov->num_allowed_vlans); - qlcnic_sriov_alloc_vlans(adapter); + ret = qlcnic_sriov_alloc_vlans(adapter); + if (ret) + return ret; if (!sriov->any_vlan) return 0; @@ -523,8 +525,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, - int pci_using_dac) +static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter) { int err; @@ -569,7 +570,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, if (err) goto err_out_send_channel_term; - err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); + err = qlcnic_setup_netdev(adapter, adapter->netdev); if (err) goto err_out_send_channel_term; @@ -612,7 +613,7 @@ static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) return 0; } -int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) +int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; int err; @@ -629,7 +630,7 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (err) return err; - err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); + err = qlcnic_sriov_setup_vf(adapter); if (err) return err; @@ -2154,7 +2155,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) return err; } -void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) +int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; @@ -2164,7 +2165,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); + if (!vf->sriov_vlans) + return -ENOMEM; } + + return 0; } void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 447720b93e5a..e90fa97c0ae6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, if (err) goto del_flr_queue; - qlcnic_sriov_alloc_vlans(adapter); + err = qlcnic_sriov_alloc_vlans(adapter); + if (err) + goto del_flr_queue; return err; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index f72e13b83869..f502db9cdea9 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c @@ -133,7 +133,9 @@ static int emac_nway_reset(struct net_device *netdev) } static void emac_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct emac_adapter *adpt = netdev_priv(netdev); @@ -144,7 +146,9 @@ static void emac_get_ringparam(struct net_device *netdev, } static int emac_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct emac_adapter *adpt = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index d59fff2fbcc6..792ce9a323cd 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -246,7 +246,9 @@ qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) } static void -qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct qcaspi *qca = netdev_priv(dev); @@ -257,7 +259,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) } static int -qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 4f39f843bb3a..ad7b9e9d7f95 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1388,7 +1388,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info } static void cp_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { ring->rx_max_pending = CP_RX_RING_SIZE; ring->tx_max_pending = CP_TX_RING_SIZE; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bbe21db20417..19e2621e0645 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -615,6 +615,7 @@ struct rtl8169_private { struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ u16 cp_cmd; u32 irq_mask; + int irq; struct clk *clk; struct { @@ -1873,7 +1874,9 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) } static void rtl8169_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *data) + struct ethtool_ringparam *data, + struct kernel_ethtool_ringparam *kernel_data, + struct netlink_ext_ack *extack) { data->rx_max_pending = NUM_RX_DESC; data->rx_pending = NUM_RX_DESC; @@ -1969,8 +1972,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, /* 8125A family. */ - { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, - { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 }, + /* It seems only XID 609 made it to the mass market. + * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, + * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, + */ /* RTL8117 */ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, @@ -1978,17 +1984,26 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168EP family. */ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, - { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, - { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, + */ /* 8168H family. */ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, - { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, + */ /* 8168G family. */ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, - { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + /* It seems this chip version never made it to + * the wild. Let's disable detection. + * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + */ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, /* 8168F family. */ @@ -4698,7 +4713,7 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - free_irq(pci_irq_vector(pdev, 0), tp); + free_irq(tp->irq, tp); phy_disconnect(tp->phydev); @@ -4719,7 +4734,7 @@ static void rtl8169_netpoll(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp); + rtl8169_interrupt(tp->irq, tp); } #endif @@ -4753,8 +4768,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; - retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, - irqflags, dev->name, tp); + retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp); if (retval < 0) goto err_release_fw_2; @@ -4771,7 +4785,7 @@ out: return retval; err_free_irq: - free_irq(pci_irq_vector(pdev, 0), tp); + free_irq(tp->irq, tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); @@ -5217,8 +5231,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) static void rtl_init_mac_address(struct rtl8169_private *tp) { + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; struct net_device *dev = tp->dev; - u8 mac_addr[ETH_ALEN]; int rc; rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr); @@ -5233,7 +5247,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) if (is_valid_ether_addr(mac_addr)) goto done; - eth_hw_addr_random(dev); + eth_random_addr(mac_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n"); done: eth_hw_addr_set(dev, mac_addr); @@ -5271,12 +5286,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* Disable ASPM L1 as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users. - */ - rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); - tp->aspm_manageable = !rc; - /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { @@ -5319,6 +5328,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->mac_version = chipset; + /* Disable ASPM L1 as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + * Chips from RTL8168h partially have issues with L1.2, but seem + * to work fine with L1 and L1.1. + */ + if (tp->mac_version >= RTL_GIGA_MAC_VER_45) + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + else + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); + tp->aspm_manageable = !rc; + tp->dash_type = rtl_check_dash(tp); tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK; @@ -5340,6 +5360,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Can't allocate interrupt\n"); return rc; } + tp->irq = pci_irq_vector(pdev, 0); INIT_WORK(&tp->wk.work, rtl_task); @@ -5374,12 +5395,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (rtl_chip_supports_csum_v2(tp)) { dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; - dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; - dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; + netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2); + netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2); } else { dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; - dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; - dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; + netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1); + netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); } dev->hw_features |= NETIF_F_RXALL; @@ -5415,8 +5436,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", - rtl_chip_infos[chipset].name, dev->dev_addr, xid, - pci_irq_vector(pdev, 0)); + rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq); if (jumbo_max) netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", @@ -5440,7 +5460,9 @@ static struct pci_driver rtl8169_pci_driver = { .probe = rtl_init_one, .remove = rtl_remove_one, .shutdown = rtl_shutdown, - .driver.pm = pm_ptr(&rtl8169_pm_ops), +#ifdef CONFIG_PM + .driver.pm = &rtl8169_pm_ops, +#endif }; module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b4c597f4040c..b215cde68e10 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -30,8 +30,7 @@ #include <linux/spinlock.h> #include <linux/sys_soc.h> #include <linux/reset.h> - -#include <asm/div64.h> +#include <linux/math64.h> #include "ravb.h" @@ -1605,7 +1604,9 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) } static void ravb_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ravb_private *priv = netdev_priv(ndev); @@ -1616,7 +1617,9 @@ static void ravb_get_ringparam(struct net_device *ndev, } static int ravb_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; @@ -2218,10 +2221,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) if (copy_from_user(&config, req->ifr_data, sizeof(config))) return -EFAULT; - /* Reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: tstamp_tx_ctrl = 0; @@ -2488,8 +2487,7 @@ static int ravb_set_gti(struct net_device *ndev) if (!rate) return -EINVAL; - inc = 1000000000ULL << 20; - do_div(inc, rate); + inc = div64_ul(1000000000ULL << 20, rate); if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a3fbb2221c9a..d947a628e166 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2296,7 +2296,9 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) } static void sh_eth_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2307,7 +2309,9 @@ static void sh_eth_get_ringparam(struct net_device *ndev, } static int sh_eth_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct sh_eth_private *mdp = netdev_priv(ndev); int ret; @@ -3364,8 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "MDIO init failed: %d\n", ret); + dev_err_probe(&pdev->dev, ret, "MDIO init failed\n"); goto out_release; } diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index ba4062881eed..3fcea211716c 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1995,17 +1995,6 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, return err ? -EOPNOTSUPP : 0; } -static int rocker_port_change_proto_down(struct net_device *dev, - bool proto_down) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_port->dev->flags & IFF_UP) - rocker_port_set_enable(rocker_port, !proto_down); - rocker_port->dev->proto_down = proto_down; - return 0; -} - static void rocker_port_neigh_destroy(struct net_device *dev, struct neighbour *n) { @@ -2037,7 +2026,6 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_set_mac_address = rocker_port_set_mac_address, .ndo_change_mtu = rocker_port_change_mtu, .ndo_get_phys_port_name = rocker_port_get_phys_port_name, - .ndo_change_proto_down = rocker_port_change_proto_down, .ndo_neigh_destroy = rocker_port_neigh_destroy, .ndo_get_port_parent_id = rocker_port_get_port_parent_id, }; @@ -2882,19 +2870,10 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_pci_request_regions; } - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "dma_set_coherent_mask failed\n"); - goto err_pci_set_dma_mask; - } - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "dma_set_mask failed\n"); - goto err_pci_set_dma_mask; - } + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "dma_set_mask failed\n"); + goto err_pci_set_dma_mask; } if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 3e1ca7a8d029..bc70c6abd6a5 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2783,7 +2783,8 @@ static void ofdpa_fib4_abort(struct rocker *rocker) if (!ofdpa_port) continue; nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; - ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, + ofdpa_flow_tbl_del(ofdpa_port, + OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT, flow_entry); } spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index 835c838b7dfa..5dba4125d953 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -20,8 +20,11 @@ /* This is the maximum number of descriptor rings supported by the QDMA */ #define EFX_EF100_MAX_DMAQ_SIZE 16384UL -static void ef100_ethtool_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *ring) +static void +ef100_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 6aa81229b68a..f79b14a119ae 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx, ef100_common_stat_mask(mask); ef100_ethtool_stat_mask(mask); + if (!mc_stats) + return 0; + efx_nic_copy_stats(efx, mc_stats); efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask, stats, mc_stats, false); @@ -993,11 +996,11 @@ static int ef100_process_design_param(struct efx_nic *efx, return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE); - efx->net_dev->gso_max_size = nic_data->tso_max_payload_len; + netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len); return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); - efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs; + netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs); return 0; case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); @@ -1122,7 +1125,7 @@ static int ef100_probe_main(struct efx_nic *efx) nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT; nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT; nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT; - net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT; + netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); /* Read design parameters */ rc = ef100_check_design_params(efx); if (rc) { diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 6960a2fe2b53..302dc835ac3d 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -5,6 +5,7 @@ * Copyright 2005-2013 Solarflare Communications Inc. */ +#include <linux/filter.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> @@ -709,7 +710,7 @@ static int efx_register_netdev(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) net_dev->priv_flags |= IFF_UNICAST_FLT; net_dev->ethtool_ops = &efx_ethtool_ops; - net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; + netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS); net_dev->min_mtu = EFX_MIN_MTU; net_dev->max_mtu = EFX_MAX_MTU; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 3dbea028b325..ead550ae2709 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -10,6 +10,7 @@ #include "net_driver.h" #include <linux/module.h> +#include <linux/filter.h> #include "efx_channels.h" #include "efx.h" #include "efx_common.h" @@ -818,11 +819,8 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) old_txq_entries = efx->txq_entries; efx->rxq_entries = rxq_entries; efx->txq_entries = txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); /* Restart buffer table allocation */ efx->next_buffer_table = next_buffer_table; @@ -864,11 +862,8 @@ rollback: /* Swap back */ efx->rxq_entries = old_rxq_entries; efx->txq_entries = old_txq_entries; - for (i = 0; i < efx->n_channels; i++) { - channel = efx->channel[i]; - efx->channel[i] = other_channel[i]; - other_channel[i] = channel; - } + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); goto out; } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index f187631b2c5c..af37c990217e 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -9,6 +9,7 @@ */ #include "net_driver.h" +#include <linux/filter.h> #include <linux/module.h> #include <linux/netdevice.h> #include <net/gre.h> diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index e002ce21788d..48506373721a 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -157,8 +157,11 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, return 0; } -static void efx_ethtool_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *ring) +static void +efx_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); @@ -168,8 +171,11 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev, ring->tx_pending = efx->txq_entries; } -static int efx_ethtool_set_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *ring) +static int +efx_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); u32 txq_entries; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 314c9c69eb0e..60c595ef7589 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2267,7 +2267,7 @@ static int ef4_register_netdev(struct ef4_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &ef4_netdev_ops; net_dev->ethtool_ops = &ef4_ethtool_ops; - net_dev->gso_max_segs = EF4_TSO_MAX_SEGS; + netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS); net_dev->min_mtu = EF4_MIN_MTU; net_dev->max_mtu = EF4_MAX_MTU; diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 137e8a7aeaa1..907254b36663 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -637,8 +637,11 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev, return 0; } -static void ef4_ethtool_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *ring) +static void +ef4_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); @@ -648,8 +651,11 @@ static void ef4_ethtool_get_ringparam(struct net_device *net_dev, ring->tx_pending = efx->txq_entries; } -static int ef4_ethtool_set_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *ring) +static int +ef4_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); u32 txq_entries; diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 966f13e7475d..0c6cc2191369 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) struct ef4_rx_page_state *state; unsigned index; + if (unlikely(!rx_queue->page_ring)) + return NULL; index = rx_queue->page_remove & rx_queue->page_ptr_mask; page = rx_queue->page_ring[index]; if (page == NULL) @@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel, { struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); + if (unlikely(!rx_queue->page_ring)) + return; + do { ef4_recycle_rx_page(channel, rx_buf); rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); @@ -728,7 +733,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx, efx->rx_bufs_per_page); rx_queue->page_ring = kcalloc(page_ring_size, sizeof(*rx_queue->page_ring), GFP_KERNEL); - rx_queue->page_ptr_mask = page_ring_size - 1; + if (!rx_queue->page_ring) + rx_queue->page_ptr_mask = 0; + else + rx_queue->page_ptr_mask = page_ring_size - 1; } void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue) diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index c4fe3c48ac46..899cc1671004 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -71,7 +71,6 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, u32 flags, u32 loopback_mode, u32 loopback_speed) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); - int rc; BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); @@ -80,9 +79,8 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); - rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), NULL, 0, NULL); - return rc; } int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 797e51802ccb..f0ef515e2ade 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1765,9 +1765,6 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) { int rc; - if (init->flags) - return -EINVAL; - if ((init->tx_type != HWTSTAMP_TX_OFF) && (init->tx_type != HWTSTAMP_TX_ON)) return -ERANGE; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 606750938b89..2375cef577e4 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -338,7 +338,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, break; default: - bpf_warn_invalid_xdp_action(xdp_act); + bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); efx_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_bad_drops++; trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 68fc7d317693..633ca77a26fd 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) unsigned int index; struct page *page; + if (unlikely(!rx_queue->page_ring)) + return NULL; index = rx_queue->page_remove & rx_queue->page_ptr_mask; page = rx_queue->page_ring[index]; if (page == NULL) @@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel, { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + if (unlikely(!rx_queue->page_ring)) + return; + do { efx_recycle_rx_page(channel, rx_buf); rx_buf = efx_rx_buf_next(rx_queue, rx_buf); @@ -150,7 +155,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) efx->rx_bufs_per_page); rx_queue->page_ring = kcalloc(page_ring_size, sizeof(*rx_queue->page_ring), GFP_KERNEL); - rx_queue->page_ptr_mask = page_ring_size - 1; + if (!rx_queue->page_ring) + rx_queue->page_ptr_mask = 0; + else + rx_queue->page_ptr_mask = page_ring_size - 1; } static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cc2d907c4c4b..23a336c5096e 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev, /* get MAC address from EEPROM */ for (i = 0; i < 3; i++) addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); - eth_hw_addr_set(net_dev, (u8 *)addr); + eth_hw_addr_set(net_dev, (u8 *)addr); rc = 1; break; diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 89381f796985..dd6f69ced4ee 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev) ndev->dma = (unsigned char)-1; ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) { + ret = ndev->irq; + goto release_both; + } + lp = netdev_priv(ndev); lp->netdev = ndev; #ifdef SMC_DYNAMIC_BUS_CONFIG diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 0ce403fa5f1a..af661c65ffe2 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -856,6 +856,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) word configuration_register; word memory_info_register; word memory_cfg_register; + u8 addr[ETH_ALEN]; /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) @@ -924,9 +925,10 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) word address; address = inw( ioaddr + ADDR0 + i ); - dev->dev_addr[ i + 1] = address >> 8; - dev->dev_addr[ i ] = address & 0xFF; + addr[i + 1] = address >> 8; + addr[i] = address & 0xFF; } + eth_hw_addr_set(dev, addr); /* get the memory information */ diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index de7d8bf2c226..556bd353dd42 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -933,7 +933,7 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, } break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(priv->ndev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->ndev, prog, act); @@ -1977,11 +1977,12 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) static int netsec_probe(struct platform_device *pdev) { - struct resource *mmio_res, *eeprom_res, *irq_res; + struct resource *mmio_res, *eeprom_res; struct netsec_priv *priv; u32 hw_ver, phy_addr = 0; struct net_device *ndev; int ret; + int irq; mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mmio_res) { @@ -1995,11 +1996,9 @@ static int netsec_probe(struct platform_device *pdev) return -ENODEV; } - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq_res) { - dev_err(&pdev->dev, "No IRQ resource found.\n"); - return -ENODEV; - } + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; ndev = alloc_etherdev(sizeof(*priv)); if (!ndev) @@ -2010,7 +2009,7 @@ static int netsec_probe(struct platform_device *pdev) spin_lock_init(&priv->reglock); SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, priv); - ndev->irq = irq_res->start; + ndev->irq = irq; priv->dev = &pdev->dev; priv->ndev = ndev; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 9160f9ed363a..6b5d96bced47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -317,6 +317,7 @@ enum tx_frame_status { tx_not_ls = 0x1, tx_err = 0x2, tx_dma_own = 0x4, + tx_err_bump_tc = 0x8, }; enum dma_irq_status { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 5c74b6279d69..2ffa0a11eea5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -113,8 +113,10 @@ static void rgmii_updatel(struct qcom_ethqos *ethqos, rgmii_writel(ethqos, temp, offset); } -static void rgmii_dump(struct qcom_ethqos *ethqos) +static void rgmii_dump(void *priv) { + struct qcom_ethqos *ethqos = priv; + dev_dbg(ðqos->pdev->dev, "Rgmii register dump\n"); dev_dbg(ðqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n", rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG)); @@ -447,6 +449,24 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed) ethqos_configure(ethqos); } +static int ethqos_clks_config(void *priv, bool enabled) +{ + struct qcom_ethqos *ethqos = priv; + int ret = 0; + + if (enabled) { + ret = clk_prepare_enable(ethqos->rgmii_clk); + if (ret) { + dev_err(ðqos->pdev->dev, "rgmii_clk enable failed\n"); + return ret; + } + } else { + clk_disable_unprepare(ethqos->rgmii_clk); + } + + return ret; +} + static int qcom_ethqos_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -466,6 +486,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); } + plat_dat->clks_config = ethqos_clks_config; + ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL); if (!ethqos) { ret = -ENOMEM; @@ -489,7 +511,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) goto err_mem; } - ret = clk_prepare_enable(ethqos->rgmii_clk); + ret = ethqos_clks_config(ethqos, true); if (ret) goto err_mem; @@ -499,6 +521,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->bsp_priv = ethqos; plat_dat->fix_mac_speed = ethqos_fix_mac_speed; + plat_dat->dump_debug_regs = rgmii_dump; plat_dat->has_gmac4 = 1; plat_dat->pmt = 1; plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); @@ -507,12 +530,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (ret) goto err_clk; - rgmii_dump(ethqos); - return ret; err_clk: - clk_disable_unprepare(ethqos->rgmii_clk); + ethqos_clks_config(ethqos, false); err_mem: stmmac_remove_config_dt(pdev, plat_dat); @@ -530,7 +551,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev) return -ENODEV; ret = stmmac_pltfr_remove(pdev); - clk_disable_unprepare(ethqos->rgmii_clk); + ethqos_clks_config(ethqos, false); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 6924a6aacbd5..c469abc91fa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -33,6 +33,7 @@ struct rk_gmac_ops { void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed); void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed); void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); + bool regs_valid; u32 regs[]; }; @@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = { .set_to_rmii = rk3568_set_to_rmii, .set_rgmii_speed = rk3568_set_gmac_speed, .set_rmii_speed = rk3568_set_gmac_speed, + .regs_valid = true, .regs = { 0xfe2a0000, /* gmac0 */ 0xfe010000, /* gmac1 */ @@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, * to be distinguished. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) { + if (res && ops->regs_valid) { int i = 0; while (ops->regs[i]) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 85208128f135..b7c2579c963b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, - socfpga_dwmac_resume); +static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + stmmac_bus_clks_config(priv, false); + + return 0; +} + +static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return stmmac_bus_clks_config(priv, true); +} + +static const struct dev_pm_ops socfpga_dwmac_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume) + SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL) +}; static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index 66fc8be34bb7..e2e0f977875d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -26,7 +26,7 @@ #define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8)) #define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9) #define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8) -#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0) +#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0) #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10) #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index cbf4429fb1d2..d3b4765c1a5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -32,6 +32,8 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, return tx_not_ls; if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + ret = tx_err; + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) x->tx_jabber++; if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) @@ -53,16 +55,16 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) x->tx_deferred++; - if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) { x->tx_underflow++; + ret |= tx_err_bump_tc; + } if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) x->tx_ip_header_error++; if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) x->tx_payload_error++; - - ret = tx_err; } if (unlikely(tdes3 & TDES3_DEFERRED)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 43eead726886..40b5ed94cb54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -10,7 +10,6 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Jan_2016" #include <linux/clk.h> #include <linux/hrtimer.h> @@ -23,6 +22,7 @@ #include <linux/net_tstamp.h> #include <linux/reset.h> #include <net/page_pool.h> +#include <uapi/linux/bpf.h> struct stmmac_resources { void __iomem *addr; @@ -172,6 +172,22 @@ struct stmmac_flow_entry { int is_l4; }; +/* Rx Frame Steering */ +enum stmmac_rfs_type { + STMMAC_RFS_T_VLAN, + STMMAC_RFS_T_LLDP, + STMMAC_RFS_T_1588, + STMMAC_RFS_T_MAX, +}; + +struct stmmac_rfs_entry { + unsigned long cookie; + u16 etype; + int in_use; + int type; + int tc; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; @@ -289,6 +305,10 @@ struct stmmac_priv { struct stmmac_tc_entry *tc_entries; unsigned int flow_entries_max; struct stmmac_flow_entry *flow_entries; + unsigned int rfs_entries_max[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX]; + unsigned int rfs_entries_total; + struct stmmac_rfs_entry *rfs_entries; /* Pulse Per Second output */ struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; @@ -314,10 +334,11 @@ int stmmac_mdio_reset(struct mii_bus *mii); int stmmac_xpcs_setup(struct mii_bus *mii); void stmmac_set_ethtool_ops(struct net_device *netdev); +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); -int stmmac_open(struct net_device *dev); -int stmmac_release(struct net_device *dev); +int stmmac_xdp_open(struct net_device *dev); +void stmmac_xdp_release(struct net_device *dev); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); int stmmac_dvr_remove(struct device *dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d89455803bed..164dff5ec32e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -290,7 +290,6 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(priv->plat->pdev), sizeof(info->bus_info)); } - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, @@ -463,7 +462,9 @@ static int stmmac_nway_reset(struct net_device *dev) } static void stmmac_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct stmmac_priv *priv = netdev_priv(netdev); @@ -474,7 +475,9 @@ static void stmmac_get_ringparam(struct net_device *netdev, } static int stmmac_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending < DMA_MIN_RX_SIZE || diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d3f350c25b9b..63ff2dad8c85 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -50,6 +50,13 @@ #include "dwxgmac2.h" #include "hwif.h" +/* As long as the interface is active, we keep the timestamping counter enabled + * with fine resolution and binary rollover. This avoid non-monotonic behavior + * (clock jumps) when changing timestamping settings at runtime. + */ +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSCTRLSSR) + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -125,6 +132,8 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, + u32 rxmode, u32 chan); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -524,7 +533,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, { struct skb_shared_hwtstamps shhwtstamp; bool found = false; - s64 adjust = 0; u64 ns = 0; if (!priv->hwts_tx_en) @@ -543,12 +551,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, } if (found) { - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += -(2 * (NSEC_PER_SEC / - priv->plat->clk_ptp_rate)); - ns += adjust; - } + ns -= priv->plat->cdc_error_adj; memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -573,7 +576,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; - u64 adjust = 0; u64 ns = 0; if (!priv->hwts_rx_en) @@ -586,11 +588,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); - /* Correct the clk domain crossing(CDC) error */ - if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { - adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); - ns -= adjust; - } + ns -= priv->plat->cdc_error_adj; netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -616,8 +614,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; - struct timespec64 now; - u64 temp = 0; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; @@ -626,11 +622,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; - u32 sec_inc = 0; - u32 value = 0; - bool xmac; - - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { netdev_alert(priv->dev, "No support for HW time stamping\n"); @@ -647,10 +638,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", __func__, config.flags, config.tx_type, config.rx_filter); - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -792,42 +779,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; - if (!priv->hwts_tx_en && !priv->hwts_rx_en) - stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); - else { - value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | - tstamp_all | ptp_v2 | ptp_over_ethernet | - ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | - ts_master_en | snap_type_sel); - stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); - - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, - priv->ptpaddr, priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment and flags for later use */ - priv->sub_second_inc = sec_inc; - priv->systime_flags = value; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); - - /* initialize system time */ - ktime_get_real_ts64(&now); + priv->systime_flags = STMMAC_HWTS_ACTIVE; - /* lower 32 bits of tv_sec are safe until y2106 */ - stmmac_init_systime(priv, priv->ptpaddr, - (u32)now.tv_sec, now.tv_nsec); + if (priv->hwts_tx_en || priv->hwts_rx_en) { + priv->systime_flags |= tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel; } + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + memcpy(&priv->tstamp_config, &config, sizeof(config)); return copy_to_user(ifr->ifr_data, &config, @@ -856,6 +818,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) } /** + * stmmac_init_tstamp_counter - init hardware timestamping counter + * @priv: driver private structure + * @systime_flags: timestamping flags + * Description: + * Initialize hardware counter for packet timestamping. + * This is valid as long as the interface is open and not suspended. + * Will be rerun after resuming from suspend, case in which the timestamping + * flags updated by stmmac_hwtstamp_set() also need to be restored. + */ +int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) +{ + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + struct timespec64 now; + u32 sec_inc = 0; + u64 temp = 0; + int ret; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); + priv->systime_flags = systime_flags; + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); + +/** * stmmac_init_ptp - init PTP * @priv: driver private structure * Description: this is to verify if the HW supports the PTPv1 or PTPv2. @@ -865,9 +887,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int stmmac_init_ptp(struct stmmac_priv *priv) { bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + int ret; - if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) - return -EOPNOTSUPP; + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + if (ret) + return ret; priv->adv_ts = 0; /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ @@ -1427,16 +1451,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.addr64 <= 32) + gfp |= GFP_DMA32; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->page) return -ENOMEM; buf->page_offset = stmmac_rx_offset(priv); } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) return -ENOMEM; @@ -2356,7 +2384,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) bool work_done = true; /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); budget = min(budget, stmmac_tx_avail(priv, queue)); @@ -2440,6 +2468,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) return !!budget && work_done; } +static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) +{ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { + tc += 64; + + if (priv->plat->force_thresh_dma_mode) + stmmac_set_dma_operation_mode(priv, tc, tc, chan); + else + stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, + chan); + + priv->xstats.threshold = tc; + } +} + /** * stmmac_tx_clean - to manage the transmission completion * @priv: driver private structure @@ -2505,6 +2548,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) /* ... verify the status error condition */ if (unlikely(status & tx_err)) { priv->dev->stats.tx_errors++; + if (unlikely(status & tx_err_bump_tc)) + stmmac_bump_dma_threshold(priv, queue); } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; @@ -2755,21 +2800,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && - (tc <= 256)) { - tc += 64; - if (priv->plat->force_thresh_dma_mode) - stmmac_set_dma_operation_mode(priv, - tc, - tc, - chan); - else - stmmac_set_dma_operation_mode(priv, - tc, - SF_DMA_MODE, - chan); - priv->xstats.threshold = tc; - } + stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status[chan] == tx_hard_error)) { stmmac_tx_err(priv, chan); } @@ -3275,10 +3306,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); if (init_ptp) { - ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) - netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); - ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_warn(priv->dev, "PTP not supported by HW\n"); @@ -3643,7 +3670,7 @@ static int stmmac_request_irq(struct net_device *dev) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -int stmmac_open(struct net_device *dev) +static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); int mode = priv->plat->phy_interface; @@ -3767,11 +3794,13 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) * Description: * This is the stop entry point of the driver. */ -int stmmac_release(struct net_device *dev) +static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); u32 chan; + netif_tx_disable(dev); + if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ @@ -4450,6 +4479,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; int dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; + gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); + + if (priv->dma_cap.addr64 <= 32) + gfp |= GFP_DMA32; while (dirty-- > 0) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; @@ -4462,13 +4495,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) p = rx_q->dma_rx + entry; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->page) break; } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) break; @@ -4657,7 +4690,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); if (res == STMMAC_XDP_TX) @@ -4690,7 +4723,7 @@ static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, res = STMMAC_XDP_REDIRECT; break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(priv->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->dev, prog, act); @@ -5164,12 +5197,13 @@ read_again: if (likely(!(status & rx_not_ls)) && (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || unlikely(status != llc_snap))) { - if (buf2_len) + if (buf2_len) { buf2_len -= ETH_FCS_LEN; - else + len -= ETH_FCS_LEN; + } else if (buf1_len) { buf1_len -= ETH_FCS_LEN; - - len -= ETH_FCS_LEN; + len -= ETH_FCS_LEN; + } } if (!skb) { @@ -5507,8 +5541,6 @@ static int stmmac_set_features(struct net_device *netdev, netdev_features_t features) { struct stmmac_priv *priv = netdev_priv(netdev); - bool sph_en; - u32 chan; /* Keep the COE Type in case of csum is supporting */ if (features & NETIF_F_RXCSUM) @@ -5520,10 +5552,13 @@ static int stmmac_set_features(struct net_device *netdev, */ stmmac_rx_ipc(priv, priv->hw); - sph_en = (priv->hw->rx_csum > 0) && priv->sph; + if (priv->sph_cap) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + u32 chan; - for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) - stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } return 0; } @@ -5719,21 +5754,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) if (unlikely(status & tx_hard_error_bump_tc)) { /* Try to bump up the dma threshold on this failure */ - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && - tc <= 256) { - tc += 64; - if (priv->plat->force_thresh_dma_mode) - stmmac_set_dma_operation_mode(priv, - tc, - tc, - chan); - else - stmmac_set_dma_operation_mode(priv, - tc, - SF_DMA_MODE, - chan); - priv->xstats.threshold = tc; - } + stmmac_bump_dma_threshold(priv, chan); } else if (unlikely(status == tx_hard_error)) { stmmac_tx_err(priv, chan); } @@ -6293,7 +6314,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_lock(nq, cpu); /* Avoids TX time-out as we are sharing with slow path */ - nq->trans_start = jiffies; + txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { int res; @@ -6429,6 +6450,140 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) spin_unlock_irqrestore(&ch->lock, flags); } +void stmmac_xdp_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; + + /* Disable NAPI process */ + stmmac_disable_all_queues(priv); + + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + /* Free the IRQ lines */ + stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); + + /* Stop TX/RX DMA channels */ + stmmac_stop_all_dma(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, false); + + /* set trans_start so we don't get spurious + * watchdogs during reset + */ + netif_trans_update(dev); + netif_carrier_off(dev); +} + +int stmmac_xdp_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 rx_cnt = priv->plat->rx_queues_to_use; + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_cnt, tx_cnt); + struct stmmac_rx_queue *rx_q; + struct stmmac_tx_queue *tx_q; + u32 buf_size; + bool sph_en; + u32 chan; + int ret; + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + netdev_err(dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); + + /* Adjust Split header */ + sph_en = (priv->hw->rx_csum > 0) && priv->sph; + + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_cnt; chan++) { + rx_q = &priv->rx_queue[chan]; + + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); + + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (rx_q->buf_alloc_num * + sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + + if (rx_q->xsk_pool && rx_q->buf_alloc_num) { + buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); + stmmac_set_dma_bfsize(priv, priv->ioaddr, + buf_size, + rx_q->queue_index); + } else { + stmmac_set_dma_bfsize(priv, priv->ioaddr, + priv->dma_buf_sz, + rx_q->queue_index); + } + + stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); + } + + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_cnt; chan++) { + tx_q = &priv->tx_queue[chan]; + + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy; + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; + } + + /* Enable the MAC Rx/Tx */ + stmmac_mac_set(priv, priv->ioaddr, true); + + /* Start Rx & Tx DMA Channels */ + stmmac_start_all_dma(priv); + + ret = stmmac_request_irq(dev); + if (ret) + goto irq_error; + + /* Enable NAPI process*/ + stmmac_enable_all_queues(priv); + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + return 0; + +irq_error: + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + hrtimer_cancel(&priv->tx_queue[chan].txtimer); + + stmmac_hw_teardown(dev); +init_error: + free_dma_desc_resources(priv); +dma_desc_error: + return ret; +} + int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) { struct stmmac_priv *priv = netdev_priv(dev); @@ -7052,6 +7207,9 @@ int stmmac_dvr_probe(struct device *device, stmmac_init_fs(ndev); #endif + if (priv->plat->dump_debug_regs) + priv->plat->dump_debug_regs(priv->plat->bsp_priv); + /* Let pm_runtime_put() disable the clocks. * If CONFIG_PM is not enabled, the clocks will stay powered. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 232ac98943cd..5d29f336315b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret; - clk_prepare_enable(priv->plat->clk_ptp_ref); + stmmac_init_tstamp_counter(priv, priv->systime_flags); } return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 580cc035536b..0d24ebd37873 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) time.tv_nsec = priv->plat->est->btr_reserve[0]; time.tv_sec = priv->plat->est->btr_reserve[1]; basetime = timespec64_to_ktime(time); - cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC + + cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC + priv->plat->est->ctr[0]; time = stmmac_calc_tas_basetime(basetime, current_time_ns, @@ -309,6 +309,11 @@ void stmmac_ptp_register(struct stmmac_priv *priv) if (priv->plat->ptp_max_adj) stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; + /* Calculate the clock domain crossing (CDC) error if necessary */ + priv->plat->cdc_error_adj = 0; + if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) + priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1c4ea0b1b845..d61766eeac6d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -232,11 +232,35 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv, } } +static int tc_rfs_init(struct stmmac_priv *priv) +{ + int i; + + priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8; + priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1; + priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1; + + for (i = 0; i < STMMAC_RFS_T_MAX; i++) + priv->rfs_entries_total += priv->rfs_entries_max[i]; + + priv->rfs_entries = devm_kcalloc(priv->device, + priv->rfs_entries_total, + sizeof(*priv->rfs_entries), + GFP_KERNEL); + if (!priv->rfs_entries) + return -ENOMEM; + + dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n", + priv->rfs_entries_total); + + return 0; +} + static int tc_init(struct stmmac_priv *priv) { struct dma_features *dma_cap = &priv->dma_cap; unsigned int count; - int i; + int ret, i; if (dma_cap->l3l4fnum) { priv->flow_entries_max = dma_cap->l3l4fnum; @@ -250,10 +274,14 @@ static int tc_init(struct stmmac_priv *priv) for (i = 0; i < priv->flow_entries_max; i++) priv->flow_entries[i].idx = i; - dev_info(priv->device, "Enabled Flow TC (entries=%d)\n", + dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n", priv->flow_entries_max); } + ret = tc_rfs_init(priv); + if (ret) + return -ENOMEM; + if (!priv->plat->fpe_cfg) { priv->plat->fpe_cfg = devm_kzalloc(priv->device, sizeof(*priv->plat->fpe_cfg), @@ -425,6 +453,8 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv, return 0; } +#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0) + static int tc_add_basic_flow(struct stmmac_priv *priv, struct flow_cls_offload *cls, struct stmmac_flow_entry *entry) @@ -438,6 +468,7 @@ static int tc_add_basic_flow(struct stmmac_priv *priv, return -EINVAL; flow_rule_match_basic(rule, &match); + entry->ip_proto = match.key->ip_proto; return 0; } @@ -607,16 +638,45 @@ static int tc_del_flow(struct stmmac_priv *priv, return ret; } +static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv, + struct flow_cls_offload *cls, + bool get_free) +{ + int i; + + for (i = 0; i < priv->rfs_entries_total; i++) { + struct stmmac_rfs_entry *entry = &priv->rfs_entries[i]; + + if (entry->cookie == cls->cookie) + return entry; + if (get_free && entry->in_use == false) + return entry; + } + + return NULL; +} + #define VLAN_PRIO_FULL_MASK (0x07) static int tc_add_vlan_flow(struct stmmac_priv *priv, struct flow_cls_offload *cls) { + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; int tc = tc_classid_to_hwtc(priv->dev, cls->classid); struct flow_match_vlan match; + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } + + if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >= + priv->rfs_entries_max[STMMAC_RFS_T_VLAN]) + return -ENOENT; + /* Nothing to do here */ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) return -EINVAL; @@ -638,6 +698,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv, prio = BIT(match.key->vlan_priority); stmmac_rx_queue_prio(priv, priv->hw, prio, tc); + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->type = STMMAC_RFS_T_VLAN; + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++; } return 0; @@ -646,12 +712,40 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv, static int tc_del_vlan_flow(struct stmmac_priv *priv, struct flow_cls_offload *cls) { + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN) + return -ENOENT; + + stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc); + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->type = 0; + + priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--; + + return 0; +} + +static int tc_add_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; int tc = tc_classid_to_hwtc(priv->dev, cls->classid); + struct flow_match_basic match; + + if (!entry) { + entry = tc_find_rfs(priv, cls, true); + if (!entry) + return -ENOENT; + } /* Nothing to do here */ - if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) + if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) return -EINVAL; if (tc < 0) { @@ -659,7 +753,86 @@ static int tc_del_vlan_flow(struct stmmac_priv *priv, return -EINVAL; } - stmmac_rx_queue_prio(priv, priv->hw, 0, tc); + flow_rule_match_basic(rule, &match); + + if (match.mask->n_proto) { + u16 etype = ntohs(match.key->n_proto); + + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { + netdev_err(priv->dev, "Only full mask is supported for EthType filter"); + return -EINVAL; + } + switch (etype) { + case ETH_P_LLDP: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >= + priv->rfs_entries_max[STMMAC_RFS_T_LLDP]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_LLDP; + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, tc); + break; + case ETH_P_1588: + if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >= + priv->rfs_entries_max[STMMAC_RFS_T_1588]) + return -ENOENT; + + entry->type = STMMAC_RFS_T_1588; + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++; + + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, tc); + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", etype); + return -EINVAL; + } + + entry->in_use = true; + entry->cookie = cls->cookie; + entry->tc = tc; + entry->etype = etype; + + return 0; + } + + return -EINVAL; +} + +static int tc_del_ethtype_flow(struct stmmac_priv *priv, + struct flow_cls_offload *cls) +{ + struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false); + + if (!entry || !entry->in_use || + entry->type < STMMAC_RFS_T_LLDP || + entry->type > STMMAC_RFS_T_1588) + return -ENOENT; + + switch (entry->etype) { + case ETH_P_LLDP: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_DCBCPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--; + break; + case ETH_P_1588: + stmmac_rx_queue_routing(priv, priv->hw, + PACKET_PTPQ, 0); + priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--; + break; + default: + netdev_err(priv->dev, "EthType(0x%x) is not supported", + entry->etype); + return -EINVAL; + } + + entry->in_use = false; + entry->cookie = 0; + entry->tc = 0; + entry->etype = 0; + entry->type = 0; return 0; } @@ -673,6 +846,10 @@ static int tc_add_flow_cls(struct stmmac_priv *priv, if (!ret) return ret; + ret = tc_add_ethtype_flow(priv, cls); + if (!ret) + return ret; + return tc_add_vlan_flow(priv, cls); } @@ -685,6 +862,10 @@ static int tc_del_flow_cls(struct stmmac_priv *priv, if (!ret) return ret; + ret = tc_del_ethtype_flow(priv, cls); + if (!ret) + return ret; + return tc_del_vlan_flow(priv, cls); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 2a616c6f7cd0..9d4d8c3dad0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, need_update = !!priv->xdp_prog != !!prog; if (if_running && need_update) - stmmac_release(dev); + stmmac_xdp_release(dev); old_prog = xchg(&priv->xdp_prog, prog); if (old_prog) @@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); if (if_running && need_update) - stmmac_open(dev); + stmmac_xdp_open(dev); return 0; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index d2d4f47c7e28..dba9f12efa1c 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4893,8 +4893,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) unsigned long casreg_len; struct net_device *dev; struct cas *cp; - int i, err, pci_using_dac; u16 pci_cmd; + int i, err; u8 orig_cacheline_size = 0, cas_cacheline_size = 0; if (cas_version_printed++ == 0) @@ -4965,23 +4965,10 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - pci_using_dac = 1; - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (err < 0) { - dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " - "for consistent allocations\n"); - goto err_out_free_res; - } - - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, " - "aborting\n"); - goto err_out_free_res; - } - pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; } casreg_len = pci_resource_len(pdev, 0); @@ -5087,8 +5074,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; - if (pci_using_dac) - dev->features |= NETIF_F_HIGHDMA; + dev->features |= NETIF_F_HIGHDMA; /* MTU range: 60 - varies or 9000 */ dev->min_mtu = CAS_MIN_MTU; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 0775a5542f2f..985073eba3bd 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1884,10 +1884,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *ndev; struct bdx_priv *priv; - int err, pci_using_dac, port; unsigned long pciaddr; u32 regionSize; struct pci_nic *nic; + int err, port; ENTER; @@ -1900,16 +1900,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) /* it triggers interrupt, dunno why. */ goto err_pci; /* it's not a problem though */ - if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) && - !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) { - pci_using_dac = 1; - } else { - if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) || - (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) { - pr_err("No usable DMA configuration, aborting\n"); - goto err_dma; - } - pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + pr_err("No usable DMA configuration, aborting\n"); + goto err_dma; } err = pci_request_regions(pdev, BDX_DRV_NAME); @@ -1982,16 +1976,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* these fields are used for info purposes only * so we can have them same for all ports of the board */ ndev->if_port = port; - ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO - | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM - ; + ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM | + NETIF_F_HIGHDMA; + ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; - if (pci_using_dac) - ndev->features |= NETIF_F_HIGHDMA; - /************** priv ****************/ priv = nic->priv[port] = netdev_priv(ndev); @@ -2245,9 +2237,13 @@ static inline int bdx_tx_fifo_size_to_packets(int tx_size) * bdx_get_ringparam - report ring sizes * @netdev * @ring + * @kernel_ring + * @extack */ static void -bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct bdx_priv *priv = netdev_priv(netdev); @@ -2262,9 +2258,13 @@ bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) * bdx_set_ringparam - set ring sizes * @netdev * @ring + * @kernel_ring + * @extack */ static int -bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct bdx_priv *priv = netdev_priv(netdev); int rx_size = 0; diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index b05de9b61ad6..d45b6bb86f0b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -453,8 +453,11 @@ static int am65_cpsw_set_channels(struct net_device *ndev, return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count); } -static void am65_cpsw_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) +static void +am65_cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index c092cb61416a..8251d7eb001b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -345,7 +345,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, netif_txq = netdev_get_tx_queue(ndev, txqueue); tx_chn = &common->tx_chns[txqueue]; - trans_start = netif_txq->trans_start; + trans_start = READ_ONCE(netif_txq->trans_start); netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", txqueue, @@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) if (ret < 0) { dev_err(dev, "%pOF error reading port_id %d\n", port_np, ret); - return ret; + goto of_node_put; } if (!port_id || port_id > common->port_num) { dev_err(dev, "%pOF has invalid port_id %u %s\n", port_np, port_id, port_np->name); - return -EINVAL; + ret = -EINVAL; + goto of_node_put; } port = am65_common_get_port(common, port_id); @@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); - if (IS_ERR(port->slave.mac_sl)) - return PTR_ERR(port->slave.mac_sl); + if (IS_ERR(port->slave.mac_sl)) { + ret = PTR_ERR(port->slave.mac_sl); + goto of_node_put; + } port->disabled = !of_device_is_available(port_np); if (port->disabled) { @@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) ret = PTR_ERR(port->slave.ifphy); dev_err(dev, "%pOF error retrieving port phy: %d\n", port_np, ret); - return ret; + goto of_node_put; } port->slave.mac_only = @@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) /* get phy/link info */ if (of_phy_is_fixed_link(port_np)) { ret = of_phy_register_fixed_link(port_np); - if (ret) - return dev_err_probe(dev, ret, + if (ret) { + ret = dev_err_probe(dev, ret, "failed to register fixed-link phy %pOF\n", port_np); + goto of_node_put; + } port->slave.phy_node = of_node_get(port_np); } else { port->slave.phy_node = @@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) if (!port->slave.phy_node) { dev_err(dev, "slave[%d] no phy found\n", port_id); - return -ENODEV; + ret = -ENODEV; + goto of_node_put; } ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", port_np, ret); - return ret; + goto of_node_put; } ret = of_get_mac_address(port_np, port->slave.mac_addr); @@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) } return 0; + +of_node_put: + of_node_put(port_np); + of_node_put(node); + return ret; } static void am65_cpsw_pcpu_stats_free(void *data) diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 7449436fc87c..bef5e68dac31 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -817,7 +817,9 @@ static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue) } static void cpmac_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct cpmac_priv *priv = netdev_priv(dev); @@ -833,7 +835,9 @@ static void cpmac_get_ringparam(struct net_device *dev, } static int cpmac_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct cpmac_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 158c8d3793f4..aa42141be3c0 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -658,7 +658,9 @@ err: } void cpsw_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; @@ -671,7 +673,9 @@ void cpsw_get_ringparam(struct net_device *ndev, } int cpsw_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); int descs_num, ret; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index ecc2a6b7e28f..3537502e5e8b 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -626,10 +626,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -710,20 +706,26 @@ int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) struct cpsw_priv *priv = netdev_priv(dev); struct cpsw_common *cpsw = priv->cpsw; int slave_no = cpsw_slave_index(cpsw, priv); + struct phy_device *phy; if (!netif_running(dev)) return -EINVAL; - switch (cmd) { - case SIOCSHWTSTAMP: - return cpsw_hwtstamp_set(dev, req); - case SIOCGHWTSTAMP: - return cpsw_hwtstamp_get(dev, req); + phy = cpsw->slaves[slave_no].phy; + + if (!phy_has_hwtstamp(phy)) { + switch (cmd) { + case SIOCSHWTSTAMP: + return cpsw_hwtstamp_set(dev, req); + case SIOCGHWTSTAMP: + return cpsw_hwtstamp_get(dev, req); + } } - if (!cpsw->slaves[slave_no].phy) - return -EOPNOTSUPP; - return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); + if (phy) + return phy_mii_ioctl(phy, req, cmd); + + return -EOPNOTSUPP; } int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) @@ -1360,7 +1362,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, xdp_do_flush_map(); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(ndev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 435668ee542d..74555970730c 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -6,6 +6,8 @@ #ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ #define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ +#include <uapi/linux/bpf.h> + #include "davinci_cpdma.h" #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ @@ -491,9 +493,13 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata); int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata); int cpsw_nway_reset(struct net_device *ndev); void cpsw_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering); + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack); int cpsw_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering); + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack); int cpsw_set_channels_common(struct net_device *ndev, struct ethtool_channels *chs, cpdma_handler_fn rx_handler); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index d55f06120ce7..31df3267a01a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1454,23 +1454,33 @@ static int emac_dev_open(struct net_device *ndev) } /* Request IRQ */ - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, - res_num))) { - for (irq_num = res->start; irq_num <= res->end; irq_num++) { - if (request_irq(irq_num, emac_irq, 0, ndev->name, - ndev)) { - dev_err(emac_dev, - "DaVinci EMAC: request_irq() failed\n"); - ret = -EBUSY; + if (dev_of_node(&priv->pdev->dev)) { + while ((ret = platform_get_irq_optional(priv->pdev, res_num)) != -ENXIO) { + if (ret < 0) + goto rollback; + ret = request_irq(ret, emac_irq, 0, ndev->name, ndev); + if (ret) { + dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n"); goto rollback; } + res_num++; } - res_num++; + } else { + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, res_num))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) { + ret = request_irq(irq_num, emac_irq, 0, ndev->name, ndev); + if (ret) { + dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n"); + goto rollback; + } + } + res_num++; + } + /* prepare counters for rollback in case of an error */ + res_num--; + irq_num--; } - /* prepare counters for rollback in case of an error */ - res_num--; - irq_num--; /* Start/Enable EMAC hardware */ emac_hw_enable(priv); @@ -1554,16 +1564,24 @@ err: napi_disable(&priv->napi); rollback: - for (q = res_num; q >= 0; q--) { - res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); - /* at the first iteration, irq_num is already set to the - * right value - */ - if (q != res_num) - irq_num = res->end; + if (dev_of_node(&priv->pdev->dev)) { + for (q = res_num - 1; q >= 0; q--) { + irq_num = platform_get_irq(priv->pdev, q); + if (irq_num > 0) + free_irq(irq_num, ndev); + } + } else { + for (q = res_num; q >= 0; q--) { + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); + /* at the first iteration, irq_num is already set to the + * right value + */ + if (q != res_num) + irq_num = res->end; - for (m = irq_num; m >= res->start; m--) - free_irq(m, ndev); + for (m = irq_num; m >= res->start; m--) + free_irq(m, ndev); + } } cpdma_ctlr_stop(priv->dma); pm_runtime_put(&priv->pdev->dev); @@ -1899,13 +1917,10 @@ static int davinci_emac_probe(struct platform_device *pdev) goto err_free_txchan; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, "error getting irq res\n"); - rc = -ENOENT; + rc = platform_get_irq(pdev, 0); + if (rc < 0) goto err_free_rxchan; - } - ndev->irq = res->start; + ndev->irq = rc; rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); if (!rc) diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 33c1592d5381..751fb0bc65c5 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2654,10 +2654,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - switch (cfg.tx_type) { case HWTSTAMP_TX_OFF: gbe_dev->tx_ts_enabled = 0; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index f50f9a43d3ea..f47b8358669d 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -595,24 +595,24 @@ spider_net_set_multi(struct net_device *netdev) int i; u32 reg; struct spider_net_card *card = netdev_priv(netdev); - DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {}; + DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES); spider_net_set_promisc(card); if (netdev->flags & IFF_ALLMULTI) { - for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { - set_bit(i, bitmask); - } + bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES); goto write_hash; } + bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES); + /* well, we know, what the broadcast hash value is: it's xfd hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ - set_bit(0xfd, bitmask); + __set_bit(0xfd, bitmask); netdev_for_each_mc_addr(ha, netdev) { hash = spider_net_get_multicast_hash(netdev, ha->addr); - set_bit(hash, bitmask); + __set_bit(hash, bitmask); } write_hash: diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c index 54f655a68148..93110dba0bfa 100644 --- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c +++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c @@ -110,7 +110,9 @@ spider_net_ethtool_nway_reset(struct net_device *netdev) static void spider_net_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct spider_net_card *card = netdev_priv(netdev); diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig new file mode 100644 index 000000000000..6e2cf062ddba --- /dev/null +++ b/drivers/net/ethernet/vertexcom/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Vertexcom network device configuration +# + +config NET_VENDOR_VERTEXCOM + bool "Vertexcom devices" + default n + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Vertexcom cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_VERTEXCOM + +config MSE102X + tristate "Vertexcom MSE102x SPI" + depends on SPI + help + SPI driver for Vertexcom MSE102x SPI attached network chip. + +endif # NET_VENDOR_VERTEXCOM diff --git a/drivers/net/ethernet/vertexcom/Makefile b/drivers/net/ethernet/vertexcom/Makefile new file mode 100644 index 000000000000..f8b12e312637 --- /dev/null +++ b/drivers/net/ethernet/vertexcom/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Vertexcom network device drivers. +# + +obj-$(CONFIG_MSE102X) += mse102x.o diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c new file mode 100644 index 000000000000..89a31783fbb4 --- /dev/null +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -0,0 +1,769 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 in-tech smart charging GmbH + * + * driver is based on micrel/ks8851_spi.c + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/cache.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include <linux/spi/spi.h> +#include <linux/of_net.h> + +#define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER) + +#define DRV_NAME "mse102x" + +#define DET_CMD 0x0001 +#define DET_SOF 0x0002 +#define DET_DFT 0x55AA + +#define CMD_SHIFT 12 +#define CMD_RTS (0x1 << CMD_SHIFT) +#define CMD_CTR (0x2 << CMD_SHIFT) + +#define CMD_MASK GENMASK(15, CMD_SHIFT) +#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) + +#define DET_CMD_LEN 4 +#define DET_SOF_LEN 2 +#define DET_DFT_LEN 2 + +#define MIN_FREQ_HZ 6000000 +#define MAX_FREQ_HZ 7142857 + +struct mse102x_stats { + u64 xfer_err; + u64 invalid_cmd; + u64 invalid_ctr; + u64 invalid_dft; + u64 invalid_len; + u64 invalid_rts; + u64 invalid_sof; + u64 tx_timeout; +}; + +static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = { + "SPI transfer errors", + "Invalid command", + "Invalid CTR", + "Invalid DFT", + "Invalid frame length", + "Invalid RTS", + "Invalid SOF", + "TX timeout", +}; + +struct mse102x_net { + struct net_device *ndev; + + u8 rxd[8]; + u8 txd[8]; + + u32 msg_enable ____cacheline_aligned; + + struct sk_buff_head txq; + struct mse102x_stats stats; +}; + +struct mse102x_net_spi { + struct mse102x_net mse102x; + struct mutex lock; /* Protect SPI frame transfer */ + struct work_struct tx_work; + struct spi_device *spidev; + struct spi_message spi_msg; + struct spi_transfer spi_xfer; + +#ifdef CONFIG_DEBUG_FS + struct dentry *device_root; +#endif +}; + +#define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x) + +#ifdef CONFIG_DEBUG_FS + +static int mse102x_info_show(struct seq_file *s, void *what) +{ + struct mse102x_net_spi *mses = s->private; + + seq_printf(s, "TX ring size : %u\n", + skb_queue_len(&mses->mse102x.txq)); + + seq_printf(s, "IRQ : %d\n", + mses->spidev->irq); + + seq_printf(s, "SPI effective speed : %lu\n", + (unsigned long)mses->spi_xfer.effective_speed_hz); + seq_printf(s, "SPI mode : %x\n", + mses->spidev->mode); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(mse102x_info); + +static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) +{ + mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev), + NULL); + + debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses, + &mse102x_info_fops); +} + +static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) +{ + debugfs_remove_recursive(mses->device_root); +} + +#else /* CONFIG_DEBUG_FS */ + +static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses) +{ +} + +static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses) +{ +} + +#endif + +/* SPI register read/write calls. + * + * All these calls issue SPI transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transferring packet data. + */ + +static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 txb[2]; + int ret; + + txb[0] = cpu_to_be16(DET_CMD); + txb[1] = cpu_to_be16(cmd); + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = DET_CMD_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } +} + +static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 *txb = (__be16 *)mse->txd; + __be16 *cmd = (__be16 *)mse->rxd; + u8 *trx = mse->rxd; + int ret; + + txb[0] = 0; + txb[1] = 0; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = DET_CMD_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } else if (*cmd != cpu_to_be16(DET_CMD)) { + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, *cmd); + mse->stats.invalid_cmd++; + ret = -EIO; + } else { + memcpy(rxb, trx + 2, 2); + } + + return ret; +} + +static inline void mse102x_push_header(struct sk_buff *skb) +{ + __be16 *header = skb_push(skb, DET_SOF_LEN); + + *header = cpu_to_be16(DET_SOF); +} + +static inline void mse102x_put_footer(struct sk_buff *skb) +{ + __be16 *footer = skb_put(skb, DET_DFT_LEN); + + *footer = cpu_to_be16(DET_DFT); +} + +static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, + unsigned int pad) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + struct sk_buff *tskb; + int ret; + + netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n", + __func__, txp, txp->len, txp->data); + + if ((skb_headroom(txp) < DET_SOF_LEN) || + (skb_tailroom(txp) < DET_DFT_LEN + pad)) { + tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad, + GFP_KERNEL); + if (!tskb) + return -ENOMEM; + + dev_kfree_skb(txp); + txp = tskb; + } + + mse102x_push_header(txp); + + if (pad) + skb_put_zero(txp, pad); + + mse102x_put_footer(txp); + + xfer->tx_buf = txp->data; + xfer->rx_buf = NULL; + xfer->len = txp->len; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } + + return ret; +} + +static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, + unsigned int frame_len) +{ + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + struct spi_transfer *xfer = &mses->spi_xfer; + struct spi_message *msg = &mses->spi_msg; + __be16 *sof = (__be16 *)buff; + __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len); + int ret; + + xfer->rx_buf = buff; + xfer->tx_buf = NULL; + xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN; + + ret = spi_sync(mses->spidev, msg); + if (ret < 0) { + netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", + __func__, ret); + mse->stats.xfer_err++; + } else if (*sof != cpu_to_be16(DET_SOF)) { + netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", + __func__, *sof); + mse->stats.invalid_sof++; + ret = -EIO; + } else if (*dft != cpu_to_be16(DET_DFT)) { + netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n", + __func__, *dft); + mse->stats.invalid_dft++; + ret = -EIO; + } + + return ret; +} + +static void mse102x_dump_packet(const char *msg, int len, const char *data) +{ + printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len); + print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, + data, len, true); +} + +static void mse102x_rx_pkt_spi(struct mse102x_net *mse) +{ + struct sk_buff *skb; + unsigned int rxalign; + unsigned int rxlen; + __be16 rx = 0; + u16 cmd_resp; + u8 *rxpkt; + int ret; + + mse102x_tx_cmd_spi(mse, CMD_CTR); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + cmd_resp = be16_to_cpu(rx); + + if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) { + usleep_range(50, 100); + + mse102x_tx_cmd_spi(mse, CMD_CTR); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + if (ret) + return; + + cmd_resp = be16_to_cpu(rx); + if ((cmd_resp & CMD_MASK) != CMD_RTS) { + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_rts++; + return; + } + + net_dbg_ratelimited("%s: Unexpected response to first CMD\n", + __func__); + } + + rxlen = cmd_resp & LEN_MASK; + if (!rxlen) { + net_dbg_ratelimited("%s: No frame length defined\n", __func__); + mse->stats.invalid_len++; + return; + } + + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); + skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); + if (!skb) + return; + + /* 2 bytes Start of frame (before ethernet header) + * 2 bytes Data frame tail (after ethernet frame) + * They are copied, but ignored. + */ + rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { + mse->ndev->stats.rx_errors++; + dev_kfree_skb(skb); + return; + } + + if (netif_msg_pktdata(mse)) + mse102x_dump_packet(__func__, skb->len, skb->data); + + skb->protocol = eth_type_trans(skb, mse->ndev); + netif_rx_ni(skb); + + mse->ndev->stats.rx_packets++; + mse->ndev->stats.rx_bytes += rxlen; +} + +static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, + unsigned long work_timeout) +{ + unsigned int pad = 0; + __be16 rx = 0; + u16 cmd_resp; + int ret; + bool first = true; + + if (txb->len < 60) + pad = 60 - txb->len; + + while (1) { + mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad)); + ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); + cmd_resp = be16_to_cpu(rx); + + if (!ret) { + /* ready to send frame ? */ + if (cmd_resp == CMD_CTR) + break; + + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_ctr++; + } + + /* It's not predictable how long / many retries it takes to + * send at least one packet, so TX timeouts are possible. + * That's the reason why the netdev watchdog is not used here. + */ + if (time_after(jiffies, work_timeout)) + return -ETIMEDOUT; + + if (first) { + /* throttle at first issue */ + netif_stop_queue(mse->ndev); + /* fast retry */ + usleep_range(50, 100); + first = false; + } else { + msleep(20); + } + } + + ret = mse102x_tx_frame_spi(mse, txb, pad); + if (ret) + net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n", + __func__, ret); + + return ret; +} + +#define TX_QUEUE_MAX 10 + +static void mse102x_tx_work(struct work_struct *work) +{ + /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */ + unsigned long work_timeout = jiffies + msecs_to_jiffies(1000); + struct mse102x_net_spi *mses; + struct mse102x_net *mse; + struct sk_buff *txb; + int ret = 0; + + mses = container_of(work, struct mse102x_net_spi, tx_work); + mse = &mses->mse102x; + + while ((txb = skb_dequeue(&mse->txq))) { + mutex_lock(&mses->lock); + ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); + mutex_unlock(&mses->lock); + if (ret) { + mse->ndev->stats.tx_dropped++; + } else { + mse->ndev->stats.tx_bytes += txb->len; + mse->ndev->stats.tx_packets++; + } + + dev_kfree_skb(txb); + } + + if (ret == -ETIMEDOUT) { + if (netif_msg_timer(mse)) + netdev_err(mse->ndev, "tx work timeout\n"); + + mse->stats.tx_timeout++; + } + + netif_wake_queue(mse->ndev); +} + +static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + netif_dbg(mse, tx_queued, ndev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); + + skb_queue_tail(&mse->txq, skb); + + if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX) + netif_stop_queue(ndev); + + schedule_work(&mses->tx_work); + + return NETDEV_TX_OK; +} + +static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np) +{ + struct net_device *ndev = mse->ndev; + int ret = of_get_ethdev_address(np, ndev); + + if (ret) { + eth_hw_addr_random(ndev); + netdev_err(ndev, "Using random MAC address: %pM\n", + ndev->dev_addr); + } +} + +/* Assumption: this is called for every incoming packet */ +static irqreturn_t mse102x_irq(int irq, void *_mse) +{ + struct mse102x_net *mse = _mse; + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + mutex_lock(&mses->lock); + mse102x_rx_pkt_spi(mse); + mutex_unlock(&mses->lock); + + return IRQ_HANDLED; +} + +static int mse102x_net_open(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + int ret; + + ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, + ndev->name, mse); + if (ret < 0) { + netdev_err(ndev, "Failed to get irq: %d\n", ret); + return ret; + } + + netif_dbg(mse, ifup, ndev, "opening\n"); + + netif_start_queue(ndev); + + netif_carrier_on(ndev); + + netif_dbg(mse, ifup, ndev, "network device up\n"); + + return 0; +} + +static int mse102x_net_stop(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + netif_info(mse, ifdown, ndev, "shutting down\n"); + + netif_carrier_off(mse->ndev); + + /* stop any outstanding work */ + flush_work(&mses->tx_work); + + netif_stop_queue(ndev); + + skb_queue_purge(&mse->txq); + + free_irq(ndev->irq, mse); + + return 0; +} + +static const struct net_device_ops mse102x_netdev_ops = { + .ndo_open = mse102x_net_open, + .ndo_stop = mse102x_net_stop, + .ndo_start_xmit = mse102x_start_xmit_spi, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void mse102x_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *di) +{ + strscpy(di->driver, DRV_NAME, sizeof(di->driver)); + strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info)); +} + +static u32 mse102x_get_msglevel(struct net_device *ndev) +{ + struct mse102x_net *mse = netdev_priv(ndev); + + return mse->msg_enable; +} + +static void mse102x_set_msglevel(struct net_device *ndev, u32 to) +{ + struct mse102x_net *mse = netdev_priv(ndev); + + mse->msg_enable = to; +} + +static void mse102x_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, u64 *data) +{ + struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_stats *st = &mse->stats; + + memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64)); +} + +static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &mse102x_gstrings_stats, + sizeof(mse102x_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int mse102x_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(mse102x_gstrings_stats); + default: + return -EINVAL; + } +} + +static const struct ethtool_ops mse102x_ethtool_ops = { + .get_drvinfo = mse102x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = mse102x_get_msglevel, + .set_msglevel = mse102x_set_msglevel, + .get_ethtool_stats = mse102x_get_ethtool_stats, + .get_strings = mse102x_get_strings, + .get_sset_count = mse102x_get_sset_count, +}; + +/* driver bus management functions */ + +#ifdef CONFIG_PM_SLEEP + +static int mse102x_suspend(struct device *dev) +{ + struct mse102x_net *mse = dev_get_drvdata(dev); + struct net_device *ndev = mse->ndev; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + mse102x_net_stop(ndev); + } + + return 0; +} + +static int mse102x_resume(struct device *dev) +{ + struct mse102x_net *mse = dev_get_drvdata(dev); + struct net_device *ndev = mse->ndev; + + if (netif_running(ndev)) { + mse102x_net_open(ndev); + netif_device_attach(ndev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume); + +static int mse102x_probe_spi(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct mse102x_net_spi *mses; + struct net_device *ndev; + struct mse102x_net *mse; + int ret; + + spi->bits_per_word = 8; + spi->mode |= SPI_MODE_3; + /* enforce minimum speed to ensure device functionality */ + spi->master->min_speed_hz = MIN_FREQ_HZ; + + if (!spi->max_speed_hz) + spi->max_speed_hz = MAX_FREQ_HZ; + + if (spi->max_speed_hz < MIN_FREQ_HZ || + spi->max_speed_hz > MAX_FREQ_HZ) { + dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n", + MIN_FREQ_HZ, MAX_FREQ_HZ); + return -EINVAL; + } + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret); + return ret; + } + + ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi)); + if (!ndev) + return -ENOMEM; + + ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4); + ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4); + ndev->priv_flags &= ~IFF_TX_SKB_SHARING; + ndev->tx_queue_len = 100; + + mse = netdev_priv(ndev); + mses = to_mse102x_spi(mse); + + mses->spidev = spi; + mutex_init(&mses->lock); + INIT_WORK(&mses->tx_work, mse102x_tx_work); + + /* initialise pre-made spi transfer messages */ + spi_message_init(&mses->spi_msg); + spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg); + + ndev->irq = spi->irq; + mse->ndev = ndev; + + /* set the default message enable */ + mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT); + + skb_queue_head_init(&mse->txq); + + SET_NETDEV_DEV(ndev, dev); + + dev_set_drvdata(dev, mse); + + netif_carrier_off(mse->ndev); + ndev->netdev_ops = &mse102x_netdev_ops; + ndev->ethtool_ops = &mse102x_ethtool_ops; + + mse102x_init_mac(mse, dev->of_node); + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "failed to register network device: %d\n", ret); + return ret; + } + + mse102x_init_device_debugfs(mses); + + return 0; +} + +static int mse102x_remove_spi(struct spi_device *spi) +{ + struct mse102x_net *mse = dev_get_drvdata(&spi->dev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); + + if (netif_msg_drv(mse)) + dev_info(&spi->dev, "remove\n"); + + mse102x_remove_device_debugfs(mses); + unregister_netdev(mse->ndev); + + return 0; +} + +static const struct of_device_id mse102x_match_table[] = { + { .compatible = "vertexcom,mse1021" }, + { .compatible = "vertexcom,mse1022" }, + { } +}; +MODULE_DEVICE_TABLE(of, mse102x_match_table); + +static struct spi_driver mse102x_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = mse102x_match_table, + .pm = &mse102x_pm_ops, + }, + .probe = mse102x_probe_spi, + .remove = mse102x_remove_spi, +}; +module_spi_driver(mse102x_driver); + +MODULE_DESCRIPTION("MSE102x Network driver"); +MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index e7065c9a8e38..b900ab5aef2a 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1276,8 +1276,11 @@ static const struct attribute_group temac_attr_group = { * ethtool support */ -static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) +static void +ll_temac_ethtools_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); @@ -1291,8 +1294,11 @@ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev, ering->tx_pending = lp->tx_bd_num; } -static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) +static int +ll_temac_ethtools_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 9b068b81ae09..23ac353b35fe 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1323,8 +1323,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev, data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); } -static void axienet_ethtools_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) +static void +axienet_ethtools_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); @@ -1338,8 +1341,11 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev, ering->tx_pending = lp->tx_bd_num; } -static int axienet_ethtools_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) +static int +axienet_ethtools_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); @@ -1503,65 +1509,6 @@ static const struct ethtool_ops axienet_ethtool_ops = { .nway_reset = axienet_ethtools_nway_reset, }; -static void axienet_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct net_device *ndev = to_net_dev(config->dev); - struct axienet_local *lp = netdev_priv(ndev); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* Only support the mode we are configured for */ - switch (state->interface) { - case PHY_INTERFACE_MODE_NA: - break; - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_SGMII: - if (lp->switch_x_sgmii) - break; - fallthrough; - default: - if (state->interface != lp->phy_mode) { - netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", - phy_modes(state->interface), - phy_modes(lp->phy_mode)); - linkmode_zero(supported); - return; - } - } - - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - - phylink_set(mask, Asym_Pause); - phylink_set(mask, Pause); - - switch (state->interface) { - case PHY_INTERFACE_MODE_NA: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_GMII: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 1000baseT_Full); - if (state->interface == PHY_INTERFACE_MODE_1000BASEX) - break; - fallthrough; - case PHY_INTERFACE_MODE_MII: - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 10baseT_Full); - fallthrough; - default: - break; - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); -} - static void axienet_mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state) { @@ -1687,7 +1634,7 @@ static void axienet_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops axienet_phylink_ops = { - .validate = axienet_validate, + .validate = phylink_generic_validate, .mac_pcs_get_state = axienet_mac_pcs_get_state, .mac_an_restart = axienet_mac_an_restart, .mac_prepare = axienet_mac_prepare, @@ -2104,6 +2051,17 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.legacy_pre_march2020 = true; + lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | + MAC_10FD | MAC_100FD | MAC_1000FD; + + __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); + if (lp->switch_x_sgmii) { + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + lp->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, + lp->phylink_config.supported_interfaces); + } lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, lp->phy_mode, diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 0815de581c7f..519599480b15 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1133,14 +1133,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->ndev = ndev; /* Get IRQ for the device */ - res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(dev, "no IRQ found\n"); - rc = -ENXIO; + rc = platform_get_irq(ofdev, 0); + if (rc < 0) goto error; - } - ndev->irq = res->start; + ndev->irq = rc; res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 65fdad1107fc..df77a22d1b81 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -382,9 +382,6 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); if (ret) return ret; diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index 107039056511..145767d98445 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -38,10 +38,10 @@ ------------------------------------------------------------- */ #ifdef COMMON_MB_POOL -static SMbuf *mb_start = 0 ; -static SMbuf *mb_free = 0 ; +static SMbuf *mb_start; +static SMbuf *mb_free; static int mb_init = FALSE ; -static int call_count = 0 ; +static int call_count; #endif /* diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index 6b68a53f1b38..72c31f0013ad 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -1846,10 +1846,10 @@ void smt_swap_para(struct smt_header *sm, int len, int direction) } } + static void smt_string_swap(char *data, const char *format, int len) { const char *open_paren = NULL ; - int x ; while (len > 0 && *format) { switch (*format) { @@ -1876,19 +1876,13 @@ static void smt_string_swap(char *data, const char *format, int len) len-- ; break ; case 's' : - x = data[0] ; - data[0] = data[1] ; - data[1] = x ; + swap(data[0], data[1]) ; data += 2 ; len -= 2 ; break ; case 'l' : - x = data[0] ; - data[0] = data[3] ; - data[3] = x ; - x = data[1] ; - data[1] = data[2] ; - data[2] = x ; + swap(data[0], data[3]) ; + swap(data[1], data[2]) ; data += 4 ; len -= 4 ; break ; diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b06c17ac8d4e..ebd287039a54 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev) hw->hw_res.start = res->start; hw->hw_res.size = resource_size(res); hw->hw_res.irq = platform_get_irq(plat_dev, 0); + if (hw->hw_res.irq < 0) { + err = hw->hw_res.irq; + goto err_free_control_wq; + } + err = fjes_hw_init(&adapter->hw); if (err) goto err_free_control_wq; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 1ab94b5f9bbf..c1fdd721a730 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -17,6 +17,7 @@ #include <net/gro_cells.h> #include <net/rtnetlink.h> #include <net/geneve.h> +#include <net/gro.h> #include <net/protocol.h> #define GENEVE_NETDEV_VER "0.6" @@ -516,18 +517,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk, type = gh->proto_type; - rcu_read_lock(); ptype = gro_find_receive_by_type(type); if (!ptype) - goto out_unlock; + goto out; skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; -out_unlock: - rcu_read_unlock(); out: skb_gro_flush_final(skb, pp, flush); @@ -547,13 +545,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, gh_len = geneve_hlen(gh); type = gh->proto_type; - rcu_read_lock(); ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); - rcu_read_unlock(); - skb_set_inner_mac_header(skb, nhoff + gh_len); return err; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index bfdf89e54752..8a19a06b505d 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index b0edb91bb10a..8297411e87ea 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -30,6 +30,7 @@ /*****************************************************************************/ #include <linux/capability.h> +#include <linux/compat.h> #include <linux/module.h> #include <linux/types.h> #include <linux/net.h> diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index e2b332b54f06..edde9c3ae12b 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ @@ -792,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty) */ netif_stop_queue(ax->dev); - ax->tty = NULL; - unregister_netdev(ax->dev); /* Free all AX25 frame buffers after unreg. */ kfree(ax->rbuff); kfree(ax->xbuff); + ax->tty = NULL; + free_netdev(ax->dev); } diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 3d59dac063ac..f90830d3dfa6 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -148,6 +148,7 @@ /* ----------------------------------------------------------------------- */ +#include <linux/compat.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 396bc1c204e6..5086cd07d1ed 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -155,7 +155,7 @@ static void free_netvsc_device(struct rcu_head *head) kfree(nvdev->extension); vfree(nvdev->recv_buf); vfree(nvdev->send_buf); - kfree(nvdev->send_section_map); + bitmap_free(nvdev->send_section_map); for (i = 0; i < VRSS_CHANNEL_MAX; i++) { xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); @@ -336,7 +336,6 @@ static int netvsc_init_buf(struct hv_device *device, struct net_device *ndev = hv_get_drvdata(device); struct nvsp_message *init_packet; unsigned int buf_size; - size_t map_words; int i, ret = 0; /* Get receive buffer area. */ @@ -528,10 +527,9 @@ static int netvsc_init_buf(struct hv_device *device, net_device->send_section_size, net_device->send_section_cnt); /* Setup state for managing the send buffer. */ - map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); - - net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL); - if (net_device->send_section_map == NULL) { + net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, + GFP_KERNEL); + if (!net_device->send_section_map) { ret = -ENOMEM; goto cleanup; } diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index aa877da113f8..7856905414eb 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -68,7 +68,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(ndev, prog, act); } out: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7e66ae1d2a59..efa963b7af54 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1858,7 +1858,9 @@ static void __netvsc_get_ringparam(struct netvsc_device *nvdev, } static void netvsc_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); @@ -1870,7 +1872,9 @@ static void netvsc_get_ringparam(struct net_device *ndev, } static int netvsc_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 23ee0b14cbfa..2f5e7b31032a 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe, ret = usb_control_msg(usb_dev, pipe, request, requesttype, value, index, data, size, timeout); - if (ret < 0) { + if (ret < size) { + ret = ret < 0 ? ret : -ENODATA; + atusb->err = ret; dev_err(&usb_dev->dev, "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n", @@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb) if (!build) return -ENOMEM; - ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), - ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, - build, ATUSB_BUILD_SIZE, 1000); + /* We cannot call atusb_control_msg() here, since this request may read various length data */ + ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, + ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); if (ret >= 0) { build[ret] = 0; dev_info(&usb_dev->dev, "Firmware: build %s\n", build); diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 31f522b8e54e..1c64d5347b8e 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> +#include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -36,30 +37,55 @@ #include <net/net_namespace.h> #define TX_Q_LIMIT 32 + +struct ifb_q_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync sync; +}; + struct ifb_q_private { struct net_device *dev; struct tasklet_struct ifb_tasklet; int tasklet_pending; int txqnum; struct sk_buff_head rq; - u64 rx_packets; - u64 rx_bytes; - struct u64_stats_sync rsync; - - struct u64_stats_sync tsync; - u64 tx_packets; - u64 tx_bytes; struct sk_buff_head tq; + struct ifb_q_stats rx_stats; + struct ifb_q_stats tx_stats; } ____cacheline_aligned_in_smp; struct ifb_dev_private { struct ifb_q_private *tx_private; }; +/* For ethtools stats. */ +struct ifb_q_stats_desc { + char desc[ETH_GSTRING_LEN]; + size_t offset; +}; + +#define IFB_Q_STAT(m) offsetof(struct ifb_q_stats, m) + +static const struct ifb_q_stats_desc ifb_q_stats_desc[] = { + { "packets", IFB_Q_STAT(packets) }, + { "bytes", IFB_Q_STAT(bytes) }, +}; + +#define IFB_Q_STATS_LEN ARRAY_SIZE(ifb_q_stats_desc) + static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); +static void ifb_update_q_stats(struct ifb_q_stats *stats, int len) +{ + u64_stats_update_begin(&stats->sync); + stats->packets++; + stats->bytes += len; + u64_stats_update_end(&stats->sync); +} + static void ifb_ri_tasklet(struct tasklet_struct *t) { struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet); @@ -83,10 +109,7 @@ static void ifb_ri_tasklet(struct tasklet_struct *t) #endif nf_skip_egress(skb, true); - u64_stats_update_begin(&txp->tsync); - txp->tx_packets++; - txp->tx_bytes += skb->len; - u64_stats_update_end(&txp->tsync); + ifb_update_q_stats(&txp->tx_stats, skb->len); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); @@ -139,18 +162,18 @@ static void ifb_stats64(struct net_device *dev, for (i = 0; i < dev->num_tx_queues; i++,txp++) { do { - start = u64_stats_fetch_begin_irq(&txp->rsync); - packets = txp->rx_packets; - bytes = txp->rx_bytes; - } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); + start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync); + packets = txp->rx_stats.packets; + bytes = txp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; do { - start = u64_stats_fetch_begin_irq(&txp->tsync); - packets = txp->tx_packets; - bytes = txp->tx_bytes; - } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); + start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync); + packets = txp->tx_stats.packets; + bytes = txp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } @@ -173,14 +196,83 @@ static int ifb_dev_init(struct net_device *dev) txp->dev = dev; __skb_queue_head_init(&txp->rq); __skb_queue_head_init(&txp->tq); - u64_stats_init(&txp->rsync); - u64_stats_init(&txp->tsync); + u64_stats_init(&txp->rx_stats.sync); + u64_stats_init(&txp->tx_stats.sync); tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet); netif_tx_start_queue(netdev_get_tx_queue(dev, i)); } return 0; } +static void ifb_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + u8 *p = buf; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < dev->real_num_rx_queues; i++) + for (j = 0; j < IFB_Q_STATS_LEN; j++) + ethtool_sprintf(&p, "rx_queue_%u_%.18s", + i, ifb_q_stats_desc[j].desc); + + for (i = 0; i < dev->real_num_tx_queues; i++) + for (j = 0; j < IFB_Q_STATS_LEN; j++) + ethtool_sprintf(&p, "tx_queue_%u_%.18s", + i, ifb_q_stats_desc[j].desc); + + break; + } +} + +static int ifb_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IFB_Q_STATS_LEN * (dev->real_num_rx_queues + + dev->real_num_tx_queues); + default: + return -EOPNOTSUPP; + } +} + +static void ifb_fill_stats_data(u64 **data, + struct ifb_q_stats *q_stats) +{ + void *stats_base = (void *)q_stats; + unsigned int start; + size_t offset; + int j; + + do { + start = u64_stats_fetch_begin_irq(&q_stats->sync); + for (j = 0; j < IFB_Q_STATS_LEN; j++) { + offset = ifb_q_stats_desc[j].offset; + (*data)[j] = *(u64 *)(stats_base + offset); + } + } while (u64_stats_fetch_retry_irq(&q_stats->sync, start)); + + *data += IFB_Q_STATS_LEN; +} + +static void ifb_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp; + int i; + + for (i = 0; i < dev->real_num_rx_queues; i++) { + txp = dp->tx_private + i; + ifb_fill_stats_data(&data, &txp->rx_stats); + } + + for (i = 0; i < dev->real_num_tx_queues; i++) { + txp = dp->tx_private + i; + ifb_fill_stats_data(&data, &txp->tx_stats); + } +} + static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, .ndo_stop = ifb_close, @@ -190,6 +282,12 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_init = ifb_dev_init, }; +static const struct ethtool_ops ifb_ethtool_ops = { + .get_strings = ifb_get_strings, + .get_sset_count = ifb_get_sset_count, + .get_ethtool_stats = ifb_get_ethtool_stats, +}; + #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ @@ -213,6 +311,7 @@ static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ dev->netdev_ops = &ifb_netdev_ops; + dev->ethtool_ops = &ifb_ethtool_ops; /* Fill in device structure with ethernet-generic values. */ ether_setup(dev); @@ -241,10 +340,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) struct ifb_dev_private *dp = netdev_priv(dev); struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); - u64_stats_update_begin(&txp->rsync); - txp->rx_packets++; - txp->rx_bytes += skb->len; - u64_stats_update_end(&txp->rsync); + ifb_update_q_stats(&txp->rx_stats, skb->len); if (!skb->redirected || !skb->skb_iif) { dev_kfree_skb(skb); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index a2fcdb1abdb9..bc981043cc80 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -93,6 +93,7 @@ #define GSI_CHANNEL_STOP_RETRIES 10 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 +#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */ #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ @@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) * completion to be signaled. Returns true if the command completes * or false if it times out. */ -static bool -gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) +static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) { unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); + struct completion *completion = &gsi->completion; reinit_completion(completion); @@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, enum gsi_evt_cmd_opcode opcode) { - struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; - struct completion *completion = &evt_ring->completion; struct device *dev = gsi->dev; bool timeout; u32 val; @@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); - timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val); gsi_irq_ev_ctrl_disable(gsi); @@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) static void gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) { - struct completion *completion = &channel->completion; u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; struct device *dev = gsi->dev; @@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); - timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val); gsi_irq_ch_ctrl_disable(gsi); @@ -1074,13 +1072,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi) while (channel_mask) { u32 channel_id = __ffs(channel_mask); - struct gsi_channel *channel; channel_mask ^= BIT(channel_id); - channel = &gsi->channel[channel_id]; - - complete(&channel->completion); + complete(&gsi->completion); } } @@ -1094,13 +1089,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi) while (event_mask) { u32 evt_ring_id = __ffs(event_mask); - struct gsi_evt_ring *evt_ring; event_mask ^= BIT(evt_ring_id); - evt_ring = &gsi->evt_ring[evt_ring_id]; - - complete(&evt_ring->completion); + complete(&gsi->completion); } } @@ -1110,7 +1102,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) { if (code == GSI_OUT_OF_RESOURCES) { dev_err(gsi->dev, "channel %u out of resources\n", channel_id); - complete(&gsi->channel[channel_id].completion); + complete(&gsi->completion); return; } @@ -1127,7 +1119,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; u32 channel_id = gsi_channel_id(evt_ring->channel); - complete(&evt_ring->completion); + complete(&gsi->completion); dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", channel_id); return; @@ -1171,18 +1163,23 @@ static void gsi_isr_gp_int1(struct gsi *gsi) u32 result; u32 val; - /* This interrupt is used to handle completions of the two GENERIC - * GSI commands. We use these to allocate and halt channels on - * the modem's behalf due to a hardware quirk on IPA v4.2. Once - * allocated, the modem "owns" these channels, and as a result we - * have no way of knowing the channel's state at any given time. + /* This interrupt is used to handle completions of GENERIC GSI + * commands. We use these to allocate and halt channels on the + * modem's behalf due to a hardware quirk on IPA v4.2. The modem + * "owns" channels even when the AP allocates them, and have no + * way of knowing whether a modem channel's state has been changed. + * + * We also use GENERIC commands to enable/disable channel flow + * control for IPA v4.2+. * * It is recommended that we halt the modem channels we allocated * when shutting down, but it's possible the channel isn't running * at the time we issue the HALT command. We'll get an error in * that case, but it's harmless (the channel is already halted). + * Similarly, we could get an error back when updating flow control + * on a channel because it's not in the proper state. * - * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error + * In either case, we silently ignore a CHANNEL_NOT_RUNNING error * if we receive it. */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); @@ -1648,19 +1645,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); } +/* We use generic commands only to operate on modem channels. We don't have + * the ability to determine channel state for a modem channel, so we simply + * issue the command and wait for it to complete. + */ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, - enum gsi_generic_cmd_opcode opcode) + enum gsi_generic_cmd_opcode opcode, + u8 params) { - struct completion *completion = &gsi->completion; bool timeout; u32 val; - /* The error global interrupt type is always enabled (until we - * teardown), so we won't change that. A generic EE command - * completes with a GSI global interrupt of type GP_INT1. We - * only perform one generic command at a time (to allocate or - * halt a modem channel) and only from this function. So we - * enable the GP_INT1 IRQ type here while we're expecting it. + /* The error global interrupt type is always enabled (until we tear + * down), so we will keep it enabled. + * + * A generic EE command completes with a GSI global interrupt of + * type GP_INT1. We only perform one generic command at a time + * (to allocate, halt, or enable/disable flow control on a modem + * channel), and only from this function. So we enable the GP_INT1 + * IRQ type here, and disable it again after the command completes. */ val = BIT(ERROR_INT) | BIT(GP_INT1); iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); @@ -1674,8 +1677,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); + val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK); - timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val); /* Disable the GP_INT1 IRQ type again */ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); @@ -1692,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) { return gsi_generic_command(gsi, channel_id, - GSI_GENERIC_ALLOCATE_CHANNEL); + GSI_GENERIC_ALLOCATE_CHANNEL, 0); } static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) @@ -1702,7 +1706,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) do ret = gsi_generic_command(gsi, channel_id, - GSI_GENERIC_HALT_CHANNEL); + GSI_GENERIC_HALT_CHANNEL, 0); while (ret == -EAGAIN && retries--); if (ret) @@ -1710,6 +1714,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) ret, channel_id); } +/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */ +void +gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable) +{ + u32 retries = 0; + u32 command; + int ret; + + command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL + : GSI_GENERIC_DISABLE_FLOW_CONTROL; + /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable + * is underway. In this case we need to retry the command. + */ + if (!enable && gsi->version >= IPA_VERSION_4_11) + retries = GSI_CHANNEL_MODEM_FLOW_RETRIES; + + do + ret = gsi_generic_command(gsi, channel_id, command, 0); + while (ret == -EAGAIN && retries--); + + if (ret) + dev_err(gsi->dev, + "error %d %sabling mode channel %u flow control\n", + ret, enable ? "en" : "dis", channel_id); +} + /* Setup function for channels */ static int gsi_channel_setup(struct gsi *gsi) { @@ -1975,18 +2005,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) gsi_evt_ring_id_free(gsi, evt_ring_id); } -/* Init function for event rings; there is no gsi_evt_ring_exit() */ -static void gsi_evt_ring_init(struct gsi *gsi) -{ - u32 evt_ring_id = 0; - - gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); - gsi->ieob_enabled_bitmap = 0; - do - init_completion(&gsi->evt_ring[evt_ring_id].completion); - while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); -} - static bool gsi_channel_data_valid(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data) { @@ -2069,7 +2087,6 @@ static int gsi_channel_init_one(struct gsi *gsi, channel->tlv_count = data->channel.tlv_count; channel->tre_count = tre_count; channel->event_count = data->channel.event_count; - init_completion(&channel->completion); ret = gsi_channel_evt_ring_init(channel); if (ret) @@ -2129,7 +2146,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count, /* IPA v4.2 requires the AP to allocate channels for the modem */ modem_alloc = gsi->version == IPA_VERSION_4_2; - gsi_evt_ring_init(gsi); /* No matching exit required */ + gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); + gsi->ieob_enabled_bitmap = 0; /* The endpoint data array is indexed by endpoint name */ for (i = 0; i < count; i++) { diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 88b80dc3db79..9cc657658811 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -101,6 +101,7 @@ enum gsi_channel_state { GSI_CHANNEL_STATE_STARTED = 0x2, GSI_CHANNEL_STATE_STOPPED = 0x3, GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4, + GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */ GSI_CHANNEL_STATE_ERROR = 0xf, }; @@ -114,8 +115,6 @@ struct gsi_channel { u16 tre_count; u16 event_count; - struct completion completion; /* signals channel command completion */ - struct gsi_ring tre_ring; u32 evt_ring_id; @@ -141,28 +140,27 @@ enum gsi_evt_ring_state { struct gsi_evt_ring { struct gsi_channel *channel; - struct completion completion; /* signals event ring state changes */ struct gsi_ring ring; }; struct gsi { struct device *dev; /* Same as IPA device */ enum ipa_version version; - struct net_device dummy_dev; /* needed for NAPI */ void __iomem *virt_raw; /* I/O mapped address range */ void __iomem *virt; /* Adjusted for most registers */ u32 irq; u32 channel_count; u32 evt_ring_count; - struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; - struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; u32 event_bitmap; /* allocated event rings */ u32 modem_channel_bitmap; /* modem channels to allocate */ u32 type_enabled_bitmap; /* GSI IRQ types enabled */ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */ - struct completion completion; /* for global EE commands */ int result; /* Negative errno (generic commands) */ + struct completion completion; /* Signals GSI command completion */ struct mutex mutex; /* protects commands, programming */ + struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; + struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; + struct net_device dummy_dev; /* needed for NAPI */ }; /** @@ -219,6 +217,15 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id); int gsi_channel_stop(struct gsi *gsi, u32 channel_id); /** + * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+) + * @gsi: GSI pointer returned by gsi_setup() + * @channel_id: Modem TX channel to control + * @enable: Whether to enable flow control (i.e., prevent flow) + */ +void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, + bool enable); + +/** * gsi_channel_reset() - Reset an allocated GSI channel * @gsi: GSI pointer * @channel_id: Channel to be reset diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index bf9593d9eaea..8906f4381032 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -313,11 +313,15 @@ enum gsi_evt_cmd_opcode { #define GENERIC_OPCODE_FMASK GENMASK(4, 0) #define GENERIC_CHID_FMASK GENMASK(9, 5) #define GENERIC_EE_FMASK GENMASK(13, 10) +#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */ /** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */ enum gsi_generic_cmd_opcode { GSI_GENERIC_HALT_CHANNEL = 0x1, GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, + GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */ + GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */ + GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */ }; /* The next register is present for IPA v3.5.1 and above */ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index cff51731195a..d57472ea077f 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) wait_for_completion(&ipa->completion); } -void ipa_cmd_pipeline_clear(struct ipa *ipa) -{ - u32 count = ipa_cmd_pipeline_clear_count(); - struct gsi_trans *trans; - - trans = ipa_cmd_trans_alloc(ipa, count); - if (trans) { - ipa_cmd_pipeline_clear_add(trans); - gsi_trans_commit_wait(trans); - ipa_cmd_pipeline_clear_wait(ipa); - } else { - dev_err(&ipa->pdev->dev, - "error allocating %u entry tag transaction\n", count); - } -} - static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 69cd085d427d..05ed7e42e184 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -164,12 +164,6 @@ u32 ipa_cmd_pipeline_clear_count(void); void ipa_cmd_pipeline_clear_wait(struct ipa *ipa); /** - * ipa_cmd_pipeline_clear() - Clear the hardware pipeline - * @ipa: - IPA pointer - */ -void ipa_cmd_pipeline_clear(struct ipa *ipa); - -/** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer * @tre_count: Number of elements in the transaction diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index e62ab9c3ac67..2da2c4194f2e 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -420,15 +420,10 @@ static const struct ipa_mem_data ipa_mem_data = { /* Interconnect rates are in 1000 byte/second units */ static const struct ipa_interconnect_data ipa_interconnect_data[] = { { - .name = "memory-a", + .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ .average_bandwidth = 150000, /* 150 MBps */ }, - { - .name = "memory-b", - .peak_bandwidth = 1804000, /* 1.804 GBps */ - .average_bandwidth = 150000, /* 150 MBps */ - }, /* Average rate is unused for the next two interconnects */ { .name = "imem", diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 5528d97110d5..49d9a077d037 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -237,7 +237,8 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, } /* suspend_delay represents suspend for RX, delay for TX endpoints. - * Note that suspend is not supported starting with IPA v4.0. + * Note that suspend is not supported starting with IPA v4.0, and + * delay mode should not be used starting with IPA v4.2. */ static bool ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) @@ -248,11 +249,8 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) u32 mask; u32 val; - /* Suspend is not supported for IPA v4.0+. Delay doesn't work - * correctly on IPA v4.2. - */ if (endpoint->toward_ipa) - WARN_ON(ipa->version == IPA_VERSION_4_2); + WARN_ON(ipa->version >= IPA_VERSION_4_2); else WARN_ON(ipa->version >= IPA_VERSION_4_0); @@ -270,15 +268,15 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) return state; } -/* We currently don't care what the previous state was for delay mode */ +/* We don't care what the previous state was for delay mode */ static void ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) { + /* Delay mode should not be used for IPA v4.2+ */ + WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); WARN_ON(!endpoint->toward_ipa); - /* Delay mode doesn't work properly for IPA v4.2 */ - if (endpoint->ipa->version != IPA_VERSION_4_2) - (void)ipa_endpoint_init_ctrl(endpoint, enable); + (void)ipa_endpoint_init_ctrl(endpoint, enable); } static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) @@ -355,26 +353,29 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) return suspended; } -/* Enable or disable delay or suspend mode on all modem endpoints */ +/* Put all modem RX endpoints into suspend mode, and stop transmission + * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is + * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow + * control instead. + */ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) { u32 endpoint_id; - /* DELAY mode doesn't work correctly on IPA v4.2 */ - if (ipa->version == IPA_VERSION_4_2) - return; - for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; if (endpoint->ee_id != GSI_EE_MODEM) continue; - /* Set TX delay mode or RX suspend mode */ - if (endpoint->toward_ipa) + if (!endpoint->toward_ipa) + (void)ipa_endpoint_program_suspend(endpoint, enable); + else if (ipa->version < IPA_VERSION_4_2) ipa_endpoint_program_delay(endpoint, enable); else - (void)ipa_endpoint_program_suspend(endpoint, enable); + gsi_modem_channel_flow_control(&ipa->gsi, + endpoint->channel_id, + enable); } } @@ -853,13 +854,14 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, u32 offset; u32 val; + /* This should only be changed when HOL_BLOCK_EN is disabled */ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); val = hol_block_timer_val(ipa, microseconds); iowrite32(val, ipa->reg_virt + offset); } static void -ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) +ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) { u32 endpoint_id = endpoint->endpoint_id; u32 offset; @@ -868,6 +870,22 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) val = enable ? HOL_BLOCK_EN_FMASK : 0; offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); iowrite32(val, endpoint->ipa->reg_virt + offset); + /* When enabling, the register must be written twice for IPA v4.5+ */ + if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) + iowrite32(val, endpoint->ipa->reg_virt + offset); +} + +/* Assumes HOL_BLOCK is in disabled state */ +static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, + u32 microseconds) +{ + ipa_endpoint_init_hol_block_timer(endpoint, microseconds); + ipa_endpoint_init_hol_block_en(endpoint, true); +} + +static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) +{ + ipa_endpoint_init_hol_block_en(endpoint, false); } void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) @@ -880,8 +898,8 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; - ipa_endpoint_init_hol_block_timer(endpoint, 0); - ipa_endpoint_init_hol_block_enable(endpoint, true); + ipa_endpoint_init_hol_block_disable(endpoint); + ipa_endpoint_init_hol_block_enable(endpoint, 0); } } @@ -1136,18 +1154,19 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, { struct sk_buff *skb; + if (!endpoint->netdev) + return; + skb = __dev_alloc_skb(len, GFP_ATOMIC); - if (skb) { - skb_put(skb, len); - memcpy(skb->data, data, len); - skb->truesize += extra; - } + if (!skb) + return; + + /* Copy the data into the socket buffer and receive it */ + skb_put(skb, len); + memcpy(skb->data, data, len); + skb->truesize += extra; - /* Now receive it, or drop it if there's no netdev */ - if (endpoint->netdev) - ipa_modem_skb_rx(endpoint->netdev, skb); - else if (skb) - dev_kfree_skb_any(skb); + ipa_modem_skb_rx(endpoint->netdev, skb); } static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, @@ -1514,10 +1533,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { - if (endpoint->toward_ipa) - ipa_endpoint_program_delay(endpoint, false); - else + if (endpoint->toward_ipa) { + /* Newer versions of IPA use GSI channel flow control + * instead of endpoint DELAY mode to prevent sending data. + * Flow control is disabled for newly-allocated channels, + * and we can assume flow control is not (ever) enabled + * for AP TX channels. + */ + if (endpoint->ipa->version < IPA_VERSION_4_2) + ipa_endpoint_program_delay(endpoint, false); + } else { + /* Ensure suspend mode is off on all AP RX endpoints */ (void)ipa_endpoint_program_suspend(endpoint, false); + } ipa_endpoint_init_cfg(endpoint); ipa_endpoint_init_nat(endpoint); ipa_endpoint_init_hdr(endpoint); @@ -1525,6 +1553,8 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_hdr_metadata_mask(endpoint); ipa_endpoint_init_mode(endpoint); ipa_endpoint_init_aggr(endpoint); + if (!endpoint->toward_ipa) + ipa_endpoint_init_hol_block_disable(endpoint); ipa_endpoint_init_deaggr(endpoint); ipa_endpoint_init_rsrc_grp(endpoint); ipa_endpoint_init_seq(endpoint); @@ -1631,8 +1661,6 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); - ipa_cmd_pipeline_clear(ipa); - ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index cdfa98a76e1f..3757ce3de2c5 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -28,6 +28,7 @@ #include "ipa_reg.h" #include "ipa_mem.h" #include "ipa_table.h" +#include "ipa_smp2p.h" #include "ipa_modem.h" #include "ipa_uc.h" #include "ipa_interrupt.h" @@ -733,7 +734,7 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_endpoint_exit; - ret = ipa_modem_init(ipa, modem_init); + ret = ipa_smp2p_init(ipa, modem_init); if (ret) goto err_table_exit; @@ -775,7 +776,7 @@ err_deconfig: ipa_deconfig(ipa); err_power_put: pm_runtime_put_noidle(dev); - ipa_modem_exit(ipa); + ipa_smp2p_exit(ipa); err_table_exit: ipa_table_exit(ipa); err_endpoint_exit: @@ -801,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup(). This + * also ensures a modem-initiated setup that's underway completes. + */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (WARN_ON(ret < 0)) goto out_power_put; @@ -821,7 +827,7 @@ static int ipa_remove(struct platform_device *pdev) ipa_deconfig(ipa); out_power_put: pm_runtime_put_noidle(dev); - ipa_modem_exit(ipa); + ipa_smp2p_exit(ipa); ipa_table_exit(ipa); ipa_endpoint_exit(ipa); gsi_exit(&ipa->gsi); diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 4337b0920d3d..1e9eae208e44 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -266,9 +266,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data) } /* Now see if any required regions are not defined */ - for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT); - mem_id < IPA_MEM_COUNT; - mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) { + for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) { if (ipa_mem_id_required(ipa, mem_id)) dev_err(dev, "required memory region %u missing\n", mem_id); diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index ad116bcc0580..27d87097433f 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa) if (state != IPA_MODEM_STATE_RUNNING) return -EBUSY; - /* Prevent the modem from triggering a call to ipa_setup() */ - ipa_smp2p_disable(ipa); - /* Clean up the netdev and endpoints if it was started */ if (netdev) { struct ipa_priv *priv = netdev_priv(netdev); @@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; + /* Prevent the modem from triggering a call to ipa_setup() */ + ipa_smp2p_irq_disable_setup(ipa); + ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "error %d getting power to handle crash\n", ret); @@ -442,16 +442,6 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } -int ipa_modem_init(struct ipa *ipa, bool modem_init) -{ - return ipa_smp2p_init(ipa, modem_init); -} - -void ipa_modem_exit(struct ipa *ipa) -{ - ipa_smp2p_exit(ipa); -} - int ipa_modem_config(struct ipa *ipa) { void *notifier; diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h index 5e6e3d234454..e64ccc2402e9 100644 --- a/drivers/net/ipa/ipa_modem.h +++ b/drivers/net/ipa/ipa_modem.h @@ -18,9 +18,6 @@ void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb); void ipa_modem_suspend(struct net_device *netdev); void ipa_modem_resume(struct net_device *netdev); -int ipa_modem_init(struct ipa *ipa, bool modem_init); -void ipa_modem_exit(struct ipa *ipa); - int ipa_modem_config(struct ipa *ipa); void ipa_modem_deconfig(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index e3da95d69409..06cec7199382 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, return false; } - group_count = data->rsrc_group_src_count; + group_count = data->rsrc_group_dst_count; if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX) return false; diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index df7639c39d71..211233612039 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -53,7 +53,7 @@ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready * @power_on: Whether IPA power is on * @notified: Whether modem has been notified of power state - * @disabled: Whether setup ready interrupt handling is disabled + * @setup_disabled: Whether setup ready interrupt handler is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure */ @@ -67,7 +67,7 @@ struct ipa_smp2p { u32 setup_ready_irq; bool power_on; bool notified; - bool disabled; + bool setup_disabled; struct mutex mutex; struct notifier_block panic_notifier; }; @@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) struct device *dev; int ret; - mutex_lock(&smp2p->mutex); - - if (smp2p->disabled) - goto out_mutex_unlock; - smp2p->disabled = true; /* If any others arrive, ignore them */ + /* Ignore any (spurious) interrupts received after the first */ + if (smp2p->ipa->setup_complete) + return IRQ_HANDLED; /* Power needs to be active for setup */ dev = &smp2p->ipa->pdev->dev; @@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) out_power_put: pm_runtime_mark_last_busy(dev); (void)pm_runtime_put_autosuspend(dev); -out_mutex_unlock: - mutex_unlock(&smp2p->mutex); return IRQ_HANDLED; } @@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa) kfree(smp2p); } -void ipa_smp2p_disable(struct ipa *ipa) +void ipa_smp2p_irq_disable_setup(struct ipa *ipa) { struct ipa_smp2p *smp2p = ipa->smp2p; @@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa) mutex_lock(&smp2p->mutex); - smp2p->disabled = true; + if (!smp2p->setup_disabled) { + disable_irq(smp2p->setup_ready_irq); + smp2p->setup_disabled = true; + } mutex_unlock(&smp2p->mutex); } diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 99a956789638..59cee31a7383 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init); void ipa_smp2p_exit(struct ipa *ipa); /** - * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling + * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt * @ipa: IPA pointer * - * Prevent handling of the "setup ready" interrupt from the modem. - * This is used before initiating shutdown of the driver. + * Disable the "ipa-setup-ready" interrupt from the modem. */ -void ipa_smp2p_disable(struct ipa *ipa); +void ipa_smp2p_irq_disable_setup(struct ipa *ipa); /** * ipa_smp2p_notify_reset() - Reset modem notification state diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 1da334f54944..2f5a58bfc529 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -419,21 +419,26 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id); dma_addr_t hash_addr; dma_addr_t addr; + u32 zero_offset; u16 hash_count; + u32 zero_size; u16 hash_size; u16 count; u16 size; - /* The number of filtering endpoints determines number of entries - * in the filter table. The hashed and non-hashed filter table - * will have the same number of entries. The size of the route - * table region determines the number of entries it has. - */ + /* Compute the number of table entries to initialize */ if (filter) { - /* Include one extra "slot" to hold the filter map itself */ + /* The number of filtering endpoints determines number of + * entries in the filter table; we also add one more "slot" + * to hold the bitmap itself. The size of the hashed filter + * table is either the same as the non-hashed one, or zero. + */ count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { + /* The size of a route table region determines the number + * of entries it has. + */ count = mem->size / sizeof(__le64); hash_count = hash_mem->size / sizeof(__le64); } @@ -445,13 +450,42 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, hash_size, hash_mem->offset, hash_addr); + if (!filter) + return; + + /* Zero the unused space in the filter table */ + zero_offset = mem->offset + size; + zero_size = mem->size - size; + ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, + ipa->zero_addr, true); + if (!hash_size) + return; + + /* Zero the unused space in the hashed filter table */ + zero_offset = hash_mem->offset + hash_size; + zero_size = hash_mem->size - hash_size; + ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size, + ipa->zero_addr, true); } int ipa_table_setup(struct ipa *ipa) { struct gsi_trans *trans; - trans = ipa_cmd_trans_alloc(ipa, 4); + /* We will need at most 8 TREs: + * - IPv4: + * - One for route table initialization (non-hashed and hashed) + * - One for filter table initialization (non-hashed and hashed) + * - One to zero unused entries in the non-hashed filter table + * - One to zero unused entries in the hashed filter table + * - IPv6: + * - One for route table initialization (non-hashed and hashed) + * - One for filter table initialization (non-hashed and hashed) + * - One to zero unused entries in the non-hashed filter table + * - One to zero unused entries in the hashed filter table + * All platforms support at least 8 TREs in a transaction. + */ + trans = ipa_cmd_trans_alloc(ipa, 8); if (!trans) { dev_err(&ipa->pdev->dev, "no transaction for table setup\n"); return -EBUSY; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 6cd50106e611..c613900c3811 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -291,8 +291,7 @@ void ipvlan_process_multicast(struct work_struct *work) else kfree_skb(skb); } - if (dev) - dev_put(dev); + dev_put(dev); cond_resched(); } } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1d2f4e7d7324..696e245f6d00 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -100,8 +100,7 @@ static void ipvlan_port_destroy(struct net_device *dev) netdev_rx_handler_unregister(dev); cancel_work_sync(&port->wq); while ((skb = __skb_dequeue(&port->backlog)) != NULL) { - if (skb->dev) - dev_put(skb->dev); + dev_put(skb->dev); kfree_skb(skb); } ida_destroy(&port->ida); @@ -140,8 +139,8 @@ static int ipvlan_init(struct net_device *dev) dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; - dev->gso_max_size = phy_dev->gso_max_size; - dev->gso_max_segs = phy_dev->gso_max_segs; + netif_set_gso_max_size(dev, phy_dev->gso_max_size); + netif_set_gso_max_segs(dev, phy_dev->gso_max_segs); dev->hard_header_len = phy_dev->hard_header_len; netdev_lockdep_set_classes(dev); @@ -763,8 +762,8 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->gso_max_size = dev->gso_max_size; - ipvlan->dev->gso_max_segs = dev->gso_max_segs; + netif_set_gso_max_size(ipvlan->dev, dev->gso_max_size); + netif_set_gso_max_segs(ipvlan->dev, dev->gso_max_segs); netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a1c77cc00416..ed0edf5884ef 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -44,6 +44,7 @@ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> +#include <net/sch_generic.h> #include <net/sock.h> #include <net/checksum.h> #include <linux/if_ether.h> /* For the statistics structure. */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d2f830ec2969..6ef5f77be4d0 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -900,8 +900,8 @@ static int macvlan_init(struct net_device *dev) dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; dev->hw_enc_features |= dev->features; - dev->gso_max_size = lowerdev->gso_max_size; - dev->gso_max_segs = lowerdev->gso_max_segs; + netif_set_gso_max_size(dev, lowerdev->gso_max_size); + netif_set_gso_max_segs(dev, lowerdev->gso_max_segs); dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); @@ -1171,7 +1171,6 @@ static const struct net_device_ops macvlan_netdev_ops = { #endif .ndo_get_iflink = macvlan_dev_get_iflink, .ndo_features_check = passthru_features_check, - .ndo_change_proto_down = dev_change_proto_down_generic, }; void macvlan_common_setup(struct net_device *dev) @@ -1182,7 +1181,7 @@ void macvlan_common_setup(struct net_device *dev) dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN; dev->netdev_ops = &macvlan_netdev_ops; dev->needs_free_netdev = true; dev->header_ops = &macvlan_hard_header_ops; @@ -1748,8 +1747,8 @@ static int macvlan_device_event(struct notifier_block *unused, break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { - vlan->dev->gso_max_size = dev->gso_max_size; - vlan->dev->gso_max_segs = dev->gso_max_segs; + netif_set_gso_max_size(vlan->dev, dev->gso_max_size); + netif_set_gso_max_segs(vlan->dev, dev->gso_max_segs); netdev_update_features(vlan->dev); } break; diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index d8f966cedc89..2929471395ae 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -3,6 +3,24 @@ if MCTP menu "MCTP Device Drivers" +config MCTP_SERIAL + tristate "MCTP serial transport" + depends on TTY + select CRC_CCITT + help + This driver provides an MCTP-over-serial interface, through a + serial line-discipline, as defined by DMTF specification "DSP0253 - + MCTP Serial Transport Binding". By attaching the ldisc to a serial + device, we get a new net device to transport MCTP packets. + + This allows communication with external MCTP endpoints which use + serial as their transport. It can also be used as an easy way to + provide MCTP connectivity between virtual machines, by forwarding + data between simple virtual serial devices. + + Say y here if you need to connect to MCTP endpoints over serial. To + compile as a module, use m; the module will be called mctp-serial. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index e69de29bb2d1..d32622613ce4 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c new file mode 100644 index 000000000000..eaa6fb3224bc --- /dev/null +++ b/drivers/net/mctp/mctp-serial.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Component Transport Protocol (MCTP) - serial transport + * binding. This driver is an implementation of the DMTF specificiation + * "DSP0253 - Management Component Transport Protocol (MCTP) Serial Transport + * Binding", available at: + * + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0253_1.0.0.pdf + * + * This driver provides DSP0253-type MCTP-over-serial transport using a Linux + * tty device, by setting the N_MCTP line discipline on the tty. + * + * Copyright (c) 2021 Code Construct + */ + +#include <linux/idr.h> +#include <linux/if_arp.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include <linux/tty.h> +#include <linux/workqueue.h> +#include <linux/crc-ccitt.h> + +#include <linux/mctp.h> +#include <net/mctp.h> +#include <net/pkt_sched.h> + +#define MCTP_SERIAL_MTU 68 /* base mtu (64) + mctp header */ +#define MCTP_SERIAL_FRAME_MTU (MCTP_SERIAL_MTU + 6) /* + serial framing */ + +#define MCTP_SERIAL_VERSION 0x1 /* DSP0253 defines a single version: 1 */ + +#define BUFSIZE MCTP_SERIAL_FRAME_MTU + +#define BYTE_FRAME 0x7e +#define BYTE_ESC 0x7d + +static DEFINE_IDA(mctp_serial_ida); + +enum mctp_serial_state { + STATE_IDLE, + STATE_START, + STATE_HEADER, + STATE_DATA, + STATE_ESCAPE, + STATE_TRAILER, + STATE_DONE, + STATE_ERR, +}; + +struct mctp_serial { + struct net_device *netdev; + struct tty_struct *tty; + + int idx; + + /* protects our rx & tx state machines; held during both paths */ + spinlock_t lock; + + struct work_struct tx_work; + enum mctp_serial_state txstate, rxstate; + u16 txfcs, rxfcs, rxfcs_rcvd; + unsigned int txlen, rxlen; + unsigned int txpos, rxpos; + unsigned char txbuf[BUFSIZE], + rxbuf[BUFSIZE]; +}; + +static bool needs_escape(unsigned char c) +{ + return c == BYTE_ESC || c == BYTE_FRAME; +} + +static int next_chunk_len(struct mctp_serial *dev) +{ + int i; + + /* either we have no bytes to send ... */ + if (dev->txpos == dev->txlen) + return 0; + + /* ... or the next byte to send is an escaped byte; requiring a + * single-byte chunk... + */ + if (needs_escape(dev->txbuf[dev->txpos])) + return 1; + + /* ... or we have one or more bytes up to the next escape - this chunk + * will be those non-escaped bytes, and does not include the escaped + * byte. + */ + for (i = 1; i + dev->txpos + 1 < dev->txlen; i++) { + if (needs_escape(dev->txbuf[dev->txpos + i + 1])) + break; + } + + return i; +} + +static int write_chunk(struct mctp_serial *dev, unsigned char *buf, int len) +{ + return dev->tty->ops->write(dev->tty, buf, len); +} + +static void mctp_serial_tx_work(struct work_struct *work) +{ + struct mctp_serial *dev = container_of(work, struct mctp_serial, + tx_work); + unsigned char c, buf[3]; + unsigned long flags; + int len, txlen; + + spin_lock_irqsave(&dev->lock, flags); + + /* txstate represents the next thing to send */ + switch (dev->txstate) { + case STATE_START: + dev->txpos = 0; + fallthrough; + case STATE_HEADER: + buf[0] = BYTE_FRAME; + buf[1] = MCTP_SERIAL_VERSION; + buf[2] = dev->txlen; + + if (!dev->txpos) + dev->txfcs = crc_ccitt(0, buf + 1, 2); + + txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == 3) { + dev->txstate = STATE_DATA; + dev->txpos = 0; + } + } + break; + + case STATE_ESCAPE: + buf[0] = dev->txbuf[dev->txpos] & ~0x20; + txlen = write_chunk(dev, buf, 1); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == dev->txlen) { + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + } + } + + break; + + case STATE_DATA: + len = next_chunk_len(dev); + if (len) { + c = dev->txbuf[dev->txpos]; + if (len == 1 && needs_escape(c)) { + buf[0] = BYTE_ESC; + buf[1] = c & ~0x20; + dev->txfcs = crc_ccitt_byte(dev->txfcs, c); + txlen = write_chunk(dev, buf, 2); + if (txlen == 2) + dev->txpos++; + else if (txlen == 1) + dev->txstate = STATE_ESCAPE; + else + dev->txstate = STATE_ERR; + } else { + txlen = write_chunk(dev, + dev->txbuf + dev->txpos, + len); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txfcs = crc_ccitt(dev->txfcs, + dev->txbuf + + dev->txpos, + txlen); + dev->txpos += txlen; + } + } + if (dev->txstate == STATE_DATA && + dev->txpos == dev->txlen) { + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + } + break; + } + dev->txstate = STATE_TRAILER; + dev->txpos = 0; + fallthrough; + + case STATE_TRAILER: + buf[0] = dev->txfcs >> 8; + buf[1] = dev->txfcs & 0xff; + buf[2] = BYTE_FRAME; + txlen = write_chunk(dev, buf + dev->txpos, 3 - dev->txpos); + if (txlen <= 0) { + dev->txstate = STATE_ERR; + } else { + dev->txpos += txlen; + if (dev->txpos == 3) { + dev->txstate = STATE_DONE; + dev->txpos = 0; + } + } + break; + default: + netdev_err_once(dev->netdev, "invalid tx state %d\n", + dev->txstate); + } + + if (dev->txstate == STATE_DONE) { + dev->netdev->stats.tx_packets++; + dev->netdev->stats.tx_bytes += dev->txlen; + dev->txlen = 0; + dev->txpos = 0; + clear_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags); + dev->txstate = STATE_IDLE; + spin_unlock_irqrestore(&dev->lock, flags); + + netif_wake_queue(dev->netdev); + } else { + spin_unlock_irqrestore(&dev->lock, flags); + } +} + +static netdev_tx_t mctp_serial_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct mctp_serial *dev = netdev_priv(ndev); + unsigned long flags; + + WARN_ON(dev->txstate != STATE_IDLE); + + if (skb->len > MCTP_SERIAL_MTU) { + dev->netdev->stats.tx_dropped++; + goto out; + } + + spin_lock_irqsave(&dev->lock, flags); + netif_stop_queue(dev->netdev); + skb_copy_bits(skb, 0, dev->txbuf, skb->len); + dev->txpos = 0; + dev->txlen = skb->len; + dev->txstate = STATE_START; + spin_unlock_irqrestore(&dev->lock, flags); + + set_bit(TTY_DO_WRITE_WAKEUP, &dev->tty->flags); + schedule_work(&dev->tx_work); + +out: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void mctp_serial_tty_write_wakeup(struct tty_struct *tty) +{ + struct mctp_serial *dev = tty->disc_data; + + schedule_work(&dev->tx_work); +} + +static void mctp_serial_rx(struct mctp_serial *dev) +{ + struct mctp_skb_cb *cb; + struct sk_buff *skb; + + if (dev->rxfcs != dev->rxfcs_rcvd) { + dev->netdev->stats.rx_dropped++; + dev->netdev->stats.rx_crc_errors++; + return; + } + + skb = netdev_alloc_skb(dev->netdev, dev->rxlen); + if (!skb) { + dev->netdev->stats.rx_dropped++; + return; + } + + skb->protocol = htons(ETH_P_MCTP); + skb_put_data(skb, dev->rxbuf, dev->rxlen); + skb_reset_network_header(skb); + + cb = __mctp_cb(skb); + cb->halen = 0; + + netif_rx_ni(skb); + dev->netdev->stats.rx_packets++; + dev->netdev->stats.rx_bytes += dev->rxlen; +} + +static void mctp_serial_push_header(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxpos) { + case 0: + if (c == BYTE_FRAME) + dev->rxpos++; + else + dev->rxstate = STATE_ERR; + break; + case 1: + if (c == MCTP_SERIAL_VERSION) { + dev->rxpos++; + dev->rxfcs = crc_ccitt_byte(0, c); + } else { + dev->rxstate = STATE_ERR; + } + break; + case 2: + if (c > MCTP_SERIAL_FRAME_MTU) { + dev->rxstate = STATE_ERR; + } else { + dev->rxlen = c; + dev->rxpos = 0; + dev->rxstate = STATE_DATA; + dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c); + } + break; + } +} + +static void mctp_serial_push_trailer(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxpos) { + case 0: + dev->rxfcs_rcvd = c << 8; + dev->rxpos++; + break; + case 1: + dev->rxfcs_rcvd |= c; + dev->rxpos++; + break; + case 2: + if (c != BYTE_FRAME) { + dev->rxstate = STATE_ERR; + } else { + mctp_serial_rx(dev); + dev->rxlen = 0; + dev->rxpos = 0; + dev->rxstate = STATE_IDLE; + } + break; + } +} + +static void mctp_serial_push(struct mctp_serial *dev, unsigned char c) +{ + switch (dev->rxstate) { + case STATE_IDLE: + dev->rxstate = STATE_HEADER; + fallthrough; + case STATE_HEADER: + mctp_serial_push_header(dev, c); + break; + + case STATE_ESCAPE: + c |= 0x20; + fallthrough; + case STATE_DATA: + if (dev->rxstate != STATE_ESCAPE && c == BYTE_ESC) { + dev->rxstate = STATE_ESCAPE; + } else { + dev->rxfcs = crc_ccitt_byte(dev->rxfcs, c); + dev->rxbuf[dev->rxpos] = c; + dev->rxpos++; + dev->rxstate = STATE_DATA; + if (dev->rxpos == dev->rxlen) { + dev->rxpos = 0; + dev->rxstate = STATE_TRAILER; + } + } + break; + + case STATE_TRAILER: + mctp_serial_push_trailer(dev, c); + break; + + case STATE_ERR: + if (c == BYTE_FRAME) + dev->rxstate = STATE_IDLE; + break; + + default: + netdev_err_once(dev->netdev, "invalid rx state %d\n", + dev->rxstate); + } +} + +static void mctp_serial_tty_receive_buf(struct tty_struct *tty, + const unsigned char *c, + const char *f, int len) +{ + struct mctp_serial *dev = tty->disc_data; + int i; + + if (!netif_running(dev->netdev)) + return; + + /* we don't (currently) use the flag bytes, just data. */ + for (i = 0; i < len; i++) + mctp_serial_push(dev, c[i]); +} + +static const struct net_device_ops mctp_serial_netdev_ops = { + .ndo_start_xmit = mctp_serial_tx, +}; + +static void mctp_serial_setup(struct net_device *ndev) +{ + ndev->type = ARPHRD_MCTP; + + /* we limit at the fixed MTU, which is also the MCTP-standard + * baseline MTU, so is also our minimum + */ + ndev->mtu = MCTP_SERIAL_MTU; + ndev->max_mtu = MCTP_SERIAL_MTU; + ndev->min_mtu = MCTP_SERIAL_MTU; + + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + ndev->flags = IFF_NOARP; + ndev->netdev_ops = &mctp_serial_netdev_ops; + ndev->needs_free_netdev = true; +} + +static int mctp_serial_open(struct tty_struct *tty) +{ + struct mctp_serial *dev; + struct net_device *ndev; + char name[32]; + int idx, rc; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!tty->ops->write) + return -EOPNOTSUPP; + + idx = ida_alloc(&mctp_serial_ida, GFP_KERNEL); + if (idx < 0) + return idx; + + snprintf(name, sizeof(name), "mctpserial%d", idx); + ndev = alloc_netdev(sizeof(*dev), name, NET_NAME_ENUM, + mctp_serial_setup); + if (!ndev) { + rc = -ENOMEM; + goto free_ida; + } + + dev = netdev_priv(ndev); + dev->idx = idx; + dev->tty = tty; + dev->netdev = ndev; + dev->txstate = STATE_IDLE; + dev->rxstate = STATE_IDLE; + spin_lock_init(&dev->lock); + INIT_WORK(&dev->tx_work, mctp_serial_tx_work); + + rc = register_netdev(ndev); + if (rc) + goto free_netdev; + + tty->receive_room = 64 * 1024; + tty->disc_data = dev; + + return 0; + +free_netdev: + free_netdev(ndev); + +free_ida: + ida_free(&mctp_serial_ida, idx); + return rc; +} + +static void mctp_serial_close(struct tty_struct *tty) +{ + struct mctp_serial *dev = tty->disc_data; + int idx = dev->idx; + + unregister_netdev(dev->netdev); + cancel_work_sync(&dev->tx_work); + ida_free(&mctp_serial_ida, idx); +} + +static struct tty_ldisc_ops mctp_ldisc = { + .owner = THIS_MODULE, + .num = N_MCTP, + .name = "mctp", + .open = mctp_serial_open, + .close = mctp_serial_close, + .receive_buf = mctp_serial_tty_receive_buf, + .write_wakeup = mctp_serial_tty_write_wakeup, +}; + +static int __init mctp_serial_init(void) +{ + return tty_register_ldisc(&mctp_ldisc); +} + +static void __exit mctp_serial_exit(void) +{ + tty_unregister_ldisc(&mctp_ldisc); +} + +module_init(mctp_serial_init); +module_exit(mctp_serial_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>"); +MODULE_DESCRIPTION("MCTP Serial transport"); diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig index 6da1fcb25847..bfa16826a6e1 100644 --- a/drivers/net/mdio/Kconfig +++ b/drivers/net/mdio/Kconfig @@ -141,7 +141,7 @@ config MDIO_MVUSB config MDIO_MSCC_MIIM tristate "Microsemi MIIM interface support" - depends on HAS_IOMEM + depends on HAS_IOMEM && REGMAP_MMIO select MDIO_DEVRES help This driver supports the MIIM (MDIO) interface found in the network diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index cad820568f75..966c3b4ad59d 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), + ASPEED_MDIO_INTERVAL_US, + ASPEED_MDIO_TIMEOUT_US); + if (rc < 0) + return rc; + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, ASPEED_MDIO_INTERVAL_US, diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index bd1aea2d5a26..37e0d8b6da07 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -127,7 +127,7 @@ ipq8064_mdio_probe(struct platform_device *pdev) if (of_address_to_resource(np, 0, &res)) return -ENOMEM; - base = ioremap(res.start, resource_size(&res)); + base = devm_ioremap(&pdev->dev, res.start, resource_size(&res)); if (!base) return -ENOMEM; diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 17f98f609ec8..7d2abaf2b2c9 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -10,10 +10,12 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> +#include <linux/mdio/mdio-mscc-miim.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) @@ -35,37 +37,52 @@ #define MSCC_PHY_REG_PHY_STATUS 0x4 struct mscc_miim_dev { - void __iomem *regs; - void __iomem *phy_regs; + struct regmap *regs; + int mii_status_offset; + struct regmap *phy_regs; + int phy_reset_offset; }; /* When high resolution timers aren't built-in: we can't use usleep_range() as * we would sleep way too long. Use udelay() instead. */ -#define mscc_readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -({ \ - if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \ - readl_poll_timeout_atomic(addr, val, cond, delay_us, \ - timeout_us); \ - readl_poll_timeout(addr, val, cond, delay_us, timeout_us); \ +#define mscc_readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us)\ +({ \ + if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \ + readx_poll_timeout_atomic(op, addr, val, cond, delay_us, \ + timeout_us); \ + readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us); \ }) -static int mscc_miim_wait_ready(struct mii_bus *bus) +static int mscc_miim_status(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; + int val, ret; + + ret = regmap_read(miim->regs, + MSCC_MIIM_REG_STATUS + miim->mii_status_offset, &val); + if (ret < 0) { + WARN_ONCE(1, "mscc miim status read error %d\n", ret); + return ret; + } + + return val; +} + +static int mscc_miim_wait_ready(struct mii_bus *bus) +{ u32 val; - return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + return mscc_readx_poll_timeout(mscc_miim_status, bus, val, !(val & MSCC_MIIM_STATUS_STAT_BUSY), 50, 10000); } static int mscc_miim_wait_pending(struct mii_bus *bus) { - struct mscc_miim_dev *miim = bus->priv; u32 val; - return mscc_readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + return mscc_readx_poll_timeout(mscc_miim_status, bus, val, !(val & MSCC_MIIM_STATUS_STAT_PENDING), 50, 10000); } @@ -80,15 +97,29 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) if (ret) goto out; - writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ, - miim->regs + MSCC_MIIM_REG_CMD); + ret = regmap_write(miim->regs, + MSCC_MIIM_REG_CMD + miim->mii_status_offset, + MSCC_MIIM_CMD_VLD | + (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | + MSCC_MIIM_CMD_OPR_READ); + + if (ret < 0) { + WARN_ONCE(1, "mscc miim write cmd reg error %d\n", ret); + goto out; + } ret = mscc_miim_wait_ready(bus); if (ret) goto out; - val = readl(miim->regs + MSCC_MIIM_REG_DATA); + ret = regmap_read(miim->regs, + MSCC_MIIM_REG_DATA + miim->mii_status_offset, &val); + if (ret < 0) { + WARN_ONCE(1, "mscc miim read data reg error %d\n", ret); + goto out; + } + if (val & MSCC_MIIM_DATA_ERROR) { ret = -EIO; goto out; @@ -109,12 +140,16 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, if (ret < 0) goto out; - writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | - (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | - (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | - MSCC_MIIM_CMD_OPR_WRITE, - miim->regs + MSCC_MIIM_REG_CMD); + ret = regmap_write(miim->regs, + MSCC_MIIM_REG_CMD + miim->mii_status_offset, + MSCC_MIIM_CMD_VLD | + (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | + (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | + MSCC_MIIM_CMD_OPR_WRITE); + if (ret < 0) + WARN_ONCE(1, "mscc miim write error %d\n", ret); out: return ret; } @@ -122,51 +157,116 @@ out: static int mscc_miim_reset(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; + int offset = miim->phy_reset_offset; + int ret; if (miim->phy_regs) { - writel(0, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); - writel(0x1ff, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); + ret = regmap_write(miim->phy_regs, + MSCC_PHY_REG_PHY_CFG + offset, 0); + if (ret < 0) { + WARN_ONCE(1, "mscc reset set error %d\n", ret); + return ret; + } + + ret = regmap_write(miim->phy_regs, + MSCC_PHY_REG_PHY_CFG + offset, 0x1ff); + if (ret < 0) { + WARN_ONCE(1, "mscc reset clear error %d\n", ret); + return ret; + } + mdelay(500); } return 0; } -static int mscc_miim_probe(struct platform_device *pdev) +static const struct regmap_config mscc_miim_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, + struct regmap *mii_regmap, int status_offset) { - struct mscc_miim_dev *dev; - struct resource *res; + struct mscc_miim_dev *miim; struct mii_bus *bus; - int ret; - bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev)); + bus = devm_mdiobus_alloc_size(dev, sizeof(*miim)); if (!bus) return -ENOMEM; - bus->name = "mscc_miim"; + bus->name = name; bus->read = mscc_miim_read; bus->write = mscc_miim_write; bus->reset = mscc_miim_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); - bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); + bus->parent = dev; + + miim = bus->priv; + + *pbus = bus; + + miim->regs = mii_regmap; + miim->mii_status_offset = status_offset; + + *pbus = bus; + + return 0; +} +EXPORT_SYMBOL(mscc_miim_setup); + +static int mscc_miim_probe(struct platform_device *pdev) +{ + struct regmap *mii_regmap, *phy_regmap = NULL; + void __iomem *regs, *phy_regs; + struct mscc_miim_dev *miim; + struct resource *res; + struct mii_bus *bus; + int ret; - dev = bus->priv; - dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); - if (IS_ERR(dev->regs)) { + regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(regs)) { dev_err(&pdev->dev, "Unable to map MIIM registers\n"); - return PTR_ERR(dev->regs); + return PTR_ERR(regs); + } + + mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs, + &mscc_miim_regmap_config); + + if (IS_ERR(mii_regmap)) { + dev_err(&pdev->dev, "Unable to create MIIM regmap\n"); + return PTR_ERR(mii_regmap); } /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { - dev->phy_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dev->phy_regs)) { + phy_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(phy_regs)) { dev_err(&pdev->dev, "Unable to map internal phy registers\n"); - return PTR_ERR(dev->phy_regs); + return PTR_ERR(phy_regs); } + + phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs, + &mscc_miim_regmap_config); + if (IS_ERR(phy_regmap)) { + dev_err(&pdev->dev, "Unable to create phy register regmap\n"); + return PTR_ERR(phy_regmap); + } + } + + ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to setup the MDIO bus\n"); + return ret; } + miim = bus->priv; + miim->phy_regs = phy_regmap; + miim->phy_reset_offset = 0; + ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ccecba908ded..ab8cd5551020 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -721,7 +721,7 @@ restart: __netpoll_cleanup(&nt->np); spin_lock_irqsave(&target_list_lock, flags); - dev_put(nt->np.dev); + dev_put_track(nt->np.dev, &nt->np.dev_tracker); nt->np.dev = NULL; nt->enabled = false; stopped = true; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 90aafb56f140..a43820212932 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -514,6 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) goto err_free; key = nmap->entry[i].key; *key = i; + memset(nmap->entry[i].value, 0, offmap->map.value_size); } } diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 54345c096a16..08d7b465a0de 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -1622,7 +1622,7 @@ err_params_unregister: devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); err_dl_unregister: - devlink_resources_unregister(devlink, NULL); + devlink_resources_unregister(devlink); err_vfc_free: kfree(nsim_dev->vfconfigs); err_devlink_free: @@ -1668,7 +1668,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev) nsim_dev_debugfs_exit(nsim_dev); devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); - devlink_resources_unregister(devlink, NULL); + devlink_resources_unregister(devlink); kfree(nsim_dev->vfconfigs); devlink_free(devlink); dev_set_drvdata(&nsim_bus_dev->dev, NULL); diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 0ab6a40be611..ffd9f84b6644 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -65,7 +65,9 @@ static int nsim_set_coalesce(struct net_device *dev, } static void nsim_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct netdevsim *ns = netdev_priv(dev); @@ -73,11 +75,16 @@ static void nsim_get_ringparam(struct net_device *dev, } static int nsim_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct netdevsim *ns = netdev_priv(dev); - memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring)); + ns->ethtool.ring.rx_pending = ring->rx_pending; + ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending; + ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending; + ns->ethtool.ring.tx_pending = ring->tx_pending; return 0; } diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index af36cd647bf5..fd3445374955 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -22,6 +22,11 @@ #define IF_MODE_SPEED_MSK GENMASK(3, 2) #define IF_MODE_HALF_DUPLEX BIT(4) +struct lynx_pcs { + struct phylink_pcs pcs; + struct mdio_device *mdio; +}; + enum sgmii_speed { SGMII_SPEED_10 = 0, SGMII_SPEED_100 = 1, @@ -30,6 +35,15 @@ enum sgmii_speed { }; #define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs) +#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs) + +struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs) +{ + struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); + + return lynx->mdio; +} +EXPORT_SYMBOL(lynx_get_mdio_device); static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, struct phylink_link_state *state) @@ -329,25 +343,27 @@ static const struct phylink_pcs_ops lynx_pcs_phylink_ops = { .pcs_link_up = lynx_pcs_link_up, }; -struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio) +struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio) { - struct lynx_pcs *lynx_pcs; + struct lynx_pcs *lynx; - lynx_pcs = kzalloc(sizeof(*lynx_pcs), GFP_KERNEL); - if (!lynx_pcs) + lynx = kzalloc(sizeof(*lynx), GFP_KERNEL); + if (!lynx) return NULL; - lynx_pcs->mdio = mdio; - lynx_pcs->pcs.ops = &lynx_pcs_phylink_ops; - lynx_pcs->pcs.poll = true; + lynx->mdio = mdio; + lynx->pcs.ops = &lynx_pcs_phylink_ops; + lynx->pcs.poll = true; - return lynx_pcs; + return lynx_to_phylink_pcs(lynx); } EXPORT_SYMBOL(lynx_pcs_create); -void lynx_pcs_destroy(struct lynx_pcs *pcs) +void lynx_pcs_destroy(struct phylink_pcs *pcs) { - kfree(pcs); + struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs); + + kfree(lynx); } EXPORT_SYMBOL(lynx_pcs_destroy); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 705c16675b80..c2d1a85ec559 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1235,9 +1235,6 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) /* reserved for future extensions */ - return -EINVAL; - if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC) return -ERANGE; diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 7113925606f7..b4ff9c5073a3 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -16,6 +16,7 @@ #include <dt-bindings/net/ti-dp83869.h> #define DP83869_PHY_ID 0x2000a0f1 +#define DP83561_PHY_ID 0x2000a1a4 #define DP83869_DEVADDR 0x1f #define MII_DP83869_PHYCTRL 0x10 @@ -878,34 +879,35 @@ static int dp83869_phy_reset(struct phy_device *phydev) return dp83869_config_init(phydev); } -static struct phy_driver dp83869_driver[] = { - { - PHY_ID_MATCH_MODEL(DP83869_PHY_ID), - .name = "TI DP83869", - - .probe = dp83869_probe, - .config_init = dp83869_config_init, - .soft_reset = dp83869_phy_reset, - - /* IRQ related */ - .config_intr = dp83869_config_intr, - .handle_interrupt = dp83869_handle_interrupt, - .read_status = dp83869_read_status, - .get_tunable = dp83869_get_tunable, - .set_tunable = dp83869_set_tunable, +#define DP83869_PHY_DRIVER(_id, _name) \ +{ \ + PHY_ID_MATCH_MODEL(_id), \ + .name = (_name), \ + .probe = dp83869_probe, \ + .config_init = dp83869_config_init, \ + .soft_reset = dp83869_phy_reset, \ + .config_intr = dp83869_config_intr, \ + .handle_interrupt = dp83869_handle_interrupt, \ + .read_status = dp83869_read_status, \ + .get_tunable = dp83869_get_tunable, \ + .set_tunable = dp83869_set_tunable, \ + .get_wol = dp83869_get_wol, \ + .set_wol = dp83869_set_wol, \ + .suspend = genphy_suspend, \ + .resume = genphy_resume, \ +} - .get_wol = dp83869_get_wol, - .set_wol = dp83869_set_wol, +static struct phy_driver dp83869_driver[] = { + DP83869_PHY_DRIVER(DP83869_PHY_ID, "TI DP83869"), + DP83869_PHY_DRIVER(DP83561_PHY_ID, "TI DP83561-SP"), - .suspend = genphy_suspend, - .resume = genphy_resume, - }, }; module_phy_driver(dp83869_driver); static struct mdio_device_id __maybe_unused dp83869_tbl[] = { { PHY_ID_MATCH_MODEL(DP83869_PHY_ID) }, + { PHY_ID_MATCH_MODEL(DP83561_PHY_ID) }, { } }; MODULE_DEVICE_TABLE(mdio, dp83869_tbl); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4fcfca4e1702..739859c0dfb1 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1225,28 +1225,28 @@ static int m88e1118_config_aneg(struct phy_device *phydev) static int m88e1118_config_init(struct phy_device *phydev) { + u16 leds; int err; - /* Change address */ - err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (err < 0) - return err; - /* Enable 1000 Mbit */ - err = phy_write(phydev, 0x15, 0x1070); + err = phy_write_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, 0x1070); if (err < 0) return err; - /* Change address */ - err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE); - if (err < 0) - return err; + if (phy_interface_is_rgmii(phydev)) { + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err < 0) + return err; + } /* Adjust LED Control */ if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS) - err = phy_write(phydev, 0x10, 0x1100); + leds = 0x1100; else - err = phy_write(phydev, 0x10, 0x021e); + leds = 0x021e; + + err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, 0x10, leds); if (err < 0) return err; @@ -1254,7 +1254,7 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; - /* Reset address */ + /* Reset page register */ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) return err; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index c204067f1890..58d602985877 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -176,9 +176,11 @@ static void mdiobus_release(struct device *d) { struct mii_bus *bus = to_mii_bus(d); - BUG_ON(bus->state != MDIOBUS_RELEASED && - /* for compatibility with error handling in drivers */ - bus->state != MDIOBUS_ALLOCATED); + WARN(bus->state != MDIOBUS_RELEASED && + /* for compatibility with error handling in drivers */ + bus->state != MDIOBUS_ALLOCATED, + "%s: not in RELEASED or ALLOCATED state\n", + bus->id); kfree(bus); } @@ -460,6 +462,9 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus, if (addr == mdiodev->addr) { device_set_node(dev, of_fwnode_handle(child)); + /* The refcount on "child" is passed to the mdio + * device. Do _not_ use of_node_put(child) here. + */ return; } } @@ -529,8 +534,9 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) bus->parent->of_node->fwnode.flags |= FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD; - BUG_ON(bus->state != MDIOBUS_ALLOCATED && - bus->state != MDIOBUS_UNREGISTERED); + WARN(bus->state != MDIOBUS_ALLOCATED && + bus->state != MDIOBUS_UNREGISTERED, + "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id); bus->owner = owner; bus->dev.parent = bus->parent; @@ -591,7 +597,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; - pr_info("%s: probed\n", bus->name); + dev_dbg(&bus->dev, "probed\n"); return 0; error: @@ -658,7 +664,8 @@ void mdiobus_free(struct mii_bus *bus) return; } - BUG_ON(bus->state != MDIOBUS_UNREGISTERED); + WARN(bus->state != MDIOBUS_UNREGISTERED, + "%s: not in UNREGISTERED state\n", bus->id); bus->state = MDIOBUS_RELEASED; put_device(&bus->dev); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 44a24b99c894..4570cb9535b7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -66,6 +66,19 @@ #define KSZ8081_LMD_SHORT_INDICATOR BIT(12) #define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0) +/* Lan8814 general Interrupt control/status reg in GPHY specific block. */ +#define LAN8814_INTC 0x18 +#define LAN8814_INTS 0x1B + +#define LAN8814_INT_LINK_DOWN BIT(2) +#define LAN8814_INT_LINK_UP BIT(0) +#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\ + LAN8814_INT_LINK_DOWN) + +#define LAN8814_INTR_CTRL_REG 0x34 +#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1) +#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0) + /* PHY Control 1 */ #define MII_KSZPHY_CTRL_1 0x1e #define KSZ8081_CTRL1_MDIX_STAT BIT(4) @@ -1565,6 +1578,14 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev, #define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17 #define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000 +#define LAN8814_QSGMII_SOFT_RESET 0x43 +#define LAN8814_QSGMII_SOFT_RESET_BIT BIT(0) +#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG 0x13 +#define LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA BIT(3) +#define LAN8814_ALIGN_SWAP 0x4a +#define LAN8814_ALIGN_TX_A_B_SWAP 0x1 +#define LAN8814_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0) + #define LAN8804_ALIGN_SWAP 0x4a #define LAN8804_ALIGN_TX_A_B_SWAP 0x1 #define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0) @@ -1601,6 +1622,29 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr, return 0; } +static int lan8814_config_init(struct phy_device *phydev) +{ + int val; + + /* Reset the PHY */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET); + val |= LAN8814_QSGMII_SOFT_RESET_BIT; + lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val); + + /* Disable ANEG with QSGMII PCS Host side */ + val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG); + val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA; + lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val); + + /* MDI-X setting for swap A,B transmit */ + val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP); + val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK; + val |= LAN8814_ALIGN_TX_A_B_SWAP; + lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val); + + return 0; +} + static int lan8804_config_init(struct phy_device *phydev) { int val; @@ -1620,6 +1664,58 @@ static int lan8804_config_init(struct phy_device *phydev) return 0; } +static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, LAN8814_INTS); + if (irq_status < 0) + return IRQ_NONE; + + if (!(irq_status & LAN8814_INT_LINK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int lan8814_ack_interrupt(struct phy_device *phydev) +{ + /* bit[12..0] int status, which is a read and clear register. */ + int rc; + + rc = phy_read(phydev, LAN8814_INTS); + + return (rc < 0) ? rc : 0; +} + +static int lan8814_config_intr(struct phy_device *phydev) +{ + int err; + + lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG, + LAN8814_INTR_CTRL_REG_POLARITY | + LAN8814_INTR_CTRL_REG_INTR_ENABLE); + + /* enable / disable interrupts */ + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = lan8814_ack_interrupt(phydev); + if (err) + return err; + + err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK); + } else { + err = phy_write(phydev, LAN8814_INTC, 0); + if (err) + return err; + + err = lan8814_ack_interrupt(phydev); + } + + return err; +} + static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, @@ -1793,6 +1889,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip INDY Gigabit Quad PHY", + .config_init = lan8814_config_init, .driver_data = &ksz9021_type, .probe = kszphy_probe, .soft_reset = genphy_soft_reset, @@ -1802,6 +1899,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = kszphy_resume, + .config_intr = lan8814_config_intr, + .handle_interrupt = lan8814_handle_interrupt, }, { .phy_id = PHY_ID_LAN8804, .phy_id_mask = MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index edb951695b13..34f829845d06 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1057,9 +1057,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; - if (cfg.flags) - return -EINVAL; - switch (cfg.tx_type) { case HWTSTAMP_TX_ONESTEP_SYNC: one_step = true; diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 91a327f67a42..06fdbae509a7 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -97,6 +97,11 @@ #define VEND1_TX_IPG_LENGTH 0xAFD1 #define COUNTER_EN BIT(15) +#define VEND1_PTP_CONFIG 0x1102 +#define EXT_TRG_EDGE BIT(1) +#define PPS_OUT_POL BIT(2) +#define PPS_OUT_EN BIT(3) + #define VEND1_LTC_LOAD_CTRL 0x1105 #define READ_LTC BIT(2) #define LOAD_LTC BIT(0) @@ -132,6 +137,13 @@ #define VEND1_EGR_RING_DATA_3 0x1151 #define VEND1_EGR_RING_CTRL 0x1154 +#define VEND1_EXT_TRG_TS_DATA_0 0x1121 +#define VEND1_EXT_TRG_TS_DATA_1 0x1122 +#define VEND1_EXT_TRG_TS_DATA_2 0x1123 +#define VEND1_EXT_TRG_TS_DATA_3 0x1124 +#define VEND1_EXT_TRG_TS_DATA_4 0x1125 +#define VEND1_EXT_TRG_TS_CTRL 0x1126 + #define RING_DATA_0_DOMAIN_NUMBER GENMASK(7, 0) #define RING_DATA_0_MSG_TYPE GENMASK(11, 8) #define RING_DATA_0_SEC_4_2 GENMASK(14, 2) @@ -162,6 +174,17 @@ #define VEND1_RX_PIPE_DLY_NS 0x114B #define VEND1_RX_PIPEDLY_SUBNS 0x114C +#define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40 +#define GPIO_FUNC_EN BIT(15) +#define GPIO_FUNC_PTP BIT(6) +#define GPIO_SIGNAL_PTP_TRIGGER 0x01 +#define GPIO_SIGNAL_PPS_OUT 0x12 +#define GPIO_DISABLE 0 +#define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \ + GPIO_SIGNAL_PPS_OUT) +#define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \ + GPIO_SIGNAL_PTP_TRIGGER) + #define RGMII_PERIOD_PS 8000U #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360) #define MIN_ID_PS 1644U @@ -199,6 +222,9 @@ struct nxp_c45_phy { int hwts_rx; u32 tx_delay; u32 rx_delay; + struct timespec64 extts_ts; + int extts_index; + bool extts; }; struct nxp_c45_phy_stats { @@ -339,6 +365,21 @@ static bool nxp_c45_match_ts(struct ptp_header *header, header->domain_number == hwts->domain_number; } +static void nxp_c45_get_extts(struct nxp_c45_phy *priv, + struct timespec64 *extts) +{ + extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_EXT_TRG_TS_DATA_0); + extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_EXT_TRG_TS_DATA_1) << 16; + extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_EXT_TRG_TS_DATA_2); + extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_EXT_TRG_TS_DATA_3) << 16; + phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL, + RING_DONE); +} + static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv, struct nxp_c45_hwts *hwts) { @@ -409,6 +450,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); bool poll_txts = nxp_c45_poll_txts(priv->phydev); struct skb_shared_hwtstamps *shhwtstamps_rx; + struct ptp_clock_event event; struct nxp_c45_hwts hwts; bool reschedule = false; struct timespec64 ts; @@ -439,9 +481,181 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) netif_rx_ni(skb); } + if (priv->extts) { + nxp_c45_get_extts(priv, &ts); + if (timespec64_compare(&ts, &priv->extts_ts) != 0) { + priv->extts_ts = ts; + event.index = priv->extts_index; + event.type = PTP_CLOCK_EXTTS; + event.timestamp = ns_to_ktime(timespec64_to_ns(&ts)); + ptp_clock_event(priv->ptp_clock, &event); + } + reschedule = true; + } + return reschedule ? 1 : -1; } +static void nxp_c45_gpio_config(struct nxp_c45_phy *priv, + int pin, u16 pin_cfg) +{ + struct phy_device *phydev = priv->phydev; + + phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg); +} + +static int nxp_c45_perout_enable(struct nxp_c45_phy *priv, + struct ptp_perout_request *perout, int on) +{ + struct phy_device *phydev = priv->phydev; + int pin; + + if (perout->flags & ~PTP_PEROUT_PHASE) + return -EOPNOTSUPP; + + pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index); + if (pin < 0) + return pin; + + if (!on) { + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, + PPS_OUT_EN); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, + PPS_OUT_POL); + + nxp_c45_gpio_config(priv, pin, GPIO_DISABLE); + + return 0; + } + + /* The PPS signal is fixed to 1 second and is always generated when the + * seconds counter is incremented. The start time is not configurable. + * If the clock is adjusted, the PPS signal is automatically readjusted. + */ + if (perout->period.sec != 1 || perout->period.nsec != 0) { + phydev_warn(phydev, "The period can be set only to 1 second."); + return -EINVAL; + } + + if (!(perout->flags & PTP_PEROUT_PHASE)) { + if (perout->start.sec != 0 || perout->start.nsec != 0) { + phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds."); + return -EINVAL; + } + } else { + if (perout->phase.nsec != 0 && + perout->phase.nsec != (NSEC_PER_SEC >> 1)) { + phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds."); + return -EINVAL; + } + + if (perout->phase.nsec == 0) + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, PPS_OUT_POL); + else + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, PPS_OUT_POL); + } + + nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG); + + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN); + + return 0; +} + +static int nxp_c45_extts_enable(struct nxp_c45_phy *priv, + struct ptp_extts_request *extts, int on) +{ + int pin; + + if (extts->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Sampling on both edges is not supported */ + if ((extts->flags & PTP_RISING_EDGE) && + (extts->flags & PTP_FALLING_EDGE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index); + if (pin < 0) + return pin; + + if (!on) { + nxp_c45_gpio_config(priv, pin, GPIO_DISABLE); + priv->extts = false; + + return 0; + } + + if (extts->flags & PTP_RISING_EDGE) + phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, EXT_TRG_EDGE); + + if (extts->flags & PTP_FALLING_EDGE) + phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1, + VEND1_PTP_CONFIG, EXT_TRG_EDGE); + + nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG); + priv->extts = true; + priv->extts_index = extts->index; + ptp_schedule_worker(priv->ptp_clock, 0); + + return 0; +} + +static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *req, int on) +{ + struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); + + switch (req->type) { + case PTP_CLK_REQ_EXTTS: + return nxp_c45_extts_enable(priv, &req->extts, on); + case PTP_CLK_REQ_PEROUT: + return nxp_c45_perout_enable(priv, &req->perout, on); + default: + return -EOPNOTSUPP; + } +} + +static struct ptp_pin_desc nxp_c45_ptp_pins[] = { + { "nxp_c45_gpio0", 0, PTP_PF_NONE}, + { "nxp_c45_gpio1", 1, PTP_PF_NONE}, + { "nxp_c45_gpio2", 2, PTP_PF_NONE}, + { "nxp_c45_gpio3", 3, PTP_PF_NONE}, + { "nxp_c45_gpio4", 4, PTP_PF_NONE}, + { "nxp_c45_gpio5", 5, PTP_PF_NONE}, + { "nxp_c45_gpio6", 6, PTP_PF_NONE}, + { "nxp_c45_gpio7", 7, PTP_PF_NONE}, + { "nxp_c45_gpio8", 8, PTP_PF_NONE}, + { "nxp_c45_gpio9", 9, PTP_PF_NONE}, + { "nxp_c45_gpio10", 10, PTP_PF_NONE}, + { "nxp_c45_gpio11", 11, PTP_PF_NONE}, +}; + +static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins)) + return -EINVAL; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + case PTP_PF_EXTTS: + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv) { priv->caps = (struct ptp_clock_info) { @@ -452,7 +666,13 @@ static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv) .adjtime = nxp_c45_ptp_adjtime, .gettimex64 = nxp_c45_ptp_gettimex64, .settime64 = nxp_c45_ptp_settime64, + .enable = nxp_c45_ptp_enable, + .verify = nxp_c45_ptp_verify_pin, .do_aux_work = nxp_c45_do_aux_work, + .pin_config = nxp_c45_ptp_pins, + .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins), + .n_ext_ts = 1, + .n_per_out = 1, }; priv->ptp_clock = ptp_clock_register(&priv->caps, diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2870c33b8975..271fc01f7f7f 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -162,11 +162,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 2500, FULL, 2500baseT_Full ), PHY_SETTING( 2500, FULL, 2500baseX_Full ), /* 1G */ - PHY_SETTING( 1000, FULL, 1000baseKX_Full ), PHY_SETTING( 1000, FULL, 1000baseT_Full ), PHY_SETTING( 1000, HALF, 1000baseT_Half ), PHY_SETTING( 1000, FULL, 1000baseT1_Full ), PHY_SETTING( 1000, FULL, 1000baseX_Full ), + PHY_SETTING( 1000, FULL, 1000baseKX_Full ), /* 100M */ PHY_SETTING( 100, FULL, 100baseT_Full ), PHY_SETTING( 100, FULL, 100baseT1_Full ), diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3ad7397b8119..420201858564 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -166,6 +166,307 @@ static const char *phylink_an_mode_str(unsigned int mode) return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; } +static void phylink_caps_to_linkmodes(unsigned long *linkmodes, + unsigned long caps) +{ + if (caps & MAC_SYM_PAUSE) + __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes); + + if (caps & MAC_ASYM_PAUSE) + __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes); + + if (caps & MAC_10HD) + __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); + + if (caps & MAC_10FD) + __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); + + if (caps & MAC_100HD) { + __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes); + } + + if (caps & MAC_100FD) { + __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes); + } + + if (caps & MAC_1000HD) + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes); + + if (caps & MAC_1000FD) { + __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes); + } + + if (caps & MAC_2500FD) { + __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes); + } + + if (caps & MAC_5000FD) + __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes); + + if (caps & MAC_10000FD) { + __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes); + } + + if (caps & MAC_25000FD) { + __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes); + } + + if (caps & MAC_40000FD) { + __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes); + } + + if (caps & MAC_50000FD) { + __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes); + } + + if (caps & MAC_56000FD) { + __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes); + } + + if (caps & MAC_100000FD) { + __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes); + } + + if (caps & MAC_200000FD) { + __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes); + } + + if (caps & MAC_400000FD) { + __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes); + } +} + +/** + * phylink_get_linkmodes() - get acceptable link modes + * @linkmodes: ethtool linkmode mask (must be already initialised) + * @interface: phy interface mode defined by &typedef phy_interface_t + * @mac_capabilities: bitmask of MAC capabilities + * + * Set all possible pause, speed and duplex linkmodes in @linkmodes that + * are supported by the @interface mode and @mac_capabilities. @linkmodes + * must have been initialised previously. + */ +void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface, + unsigned long mac_capabilities) +{ + unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + + switch (interface) { + case PHY_INTERFACE_MODE_USXGMII: + caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD; + fallthrough; + + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_GMII: + caps |= MAC_1000HD | MAC_1000FD; + fallthrough; + + case PHY_INTERFACE_MODE_REVRMII: + case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_SMII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_MII: + caps |= MAC_10HD | MAC_10FD; + fallthrough; + + case PHY_INTERFACE_MODE_100BASEX: + caps |= MAC_100HD | MAC_100FD; + break; + + case PHY_INTERFACE_MODE_TBI: + case PHY_INTERFACE_MODE_MOCA: + case PHY_INTERFACE_MODE_RTBI: + case PHY_INTERFACE_MODE_1000BASEX: + caps |= MAC_1000HD; + fallthrough; + case PHY_INTERFACE_MODE_TRGMII: + caps |= MAC_1000FD; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + caps |= MAC_2500FD; + break; + + case PHY_INTERFACE_MODE_5GBASER: + caps |= MAC_5000FD; + break; + + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_RXAUI: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + caps |= MAC_10000FD; + break; + + case PHY_INTERFACE_MODE_25GBASER: + caps |= MAC_25000FD; + break; + + case PHY_INTERFACE_MODE_XLGMII: + caps |= MAC_40000FD; + break; + + case PHY_INTERFACE_MODE_INTERNAL: + caps |= ~0; + break; + + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_MAX: + break; + } + + phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities); +} +EXPORT_SYMBOL_GPL(phylink_get_linkmodes); + +/** + * phylink_generic_validate() - generic validate() callback implementation + * @config: a pointer to a &struct phylink_config. + * @supported: ethtool bitmask for supported link modes. + * @state: a pointer to a &struct phylink_link_state. + * + * Generic implementation of the validate() callback that MAC drivers can + * use when they pass the range of supported interfaces and MAC capabilities. + * This makes use of phylink_get_linkmodes(). + */ +void phylink_generic_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_get_linkmodes(mask, state->interface, config->mac_capabilities); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} +EXPORT_SYMBOL_GPL(phylink_generic_validate); + +static int phylink_validate_mac_and_pcs(struct phylink *pl, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct phylink_pcs *pcs; + int ret; + + /* Get the PCS for this interface mode */ + if (pl->mac_ops->mac_select_pcs) { + pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); + } else { + pcs = pl->pcs; + } + + if (pcs) { + /* The PCS, if present, must be setup before phylink_create() + * has been called. If the ops is not initialised, print an + * error and backtrace rather than oopsing the kernel. + */ + if (!pcs->ops) { + phylink_err(pl, "interface %s: uninitialised PCS\n", + phy_modes(state->interface)); + dump_stack(); + return -EINVAL; + } + + /* Validate the link parameters with the PCS */ + if (pcs->ops->pcs_validate) { + ret = pcs->ops->pcs_validate(pcs, supported, state); + if (ret < 0 || phylink_is_empty_linkmode(supported)) + return -EINVAL; + + /* Ensure the advertising mask is a subset of the + * supported mask. + */ + linkmode_and(state->advertising, state->advertising, + supported); + } + } + + /* Then validate the link parameters with the MAC */ + pl->mac_ops->validate(pl->config, supported, state); + + return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; +} + static int phylink_validate_any(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { @@ -181,9 +482,10 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported, t = *state; t.interface = intf; - pl->mac_ops->validate(pl->config, s, &t); - linkmode_or(all_s, all_s, s); - linkmode_or(all_adv, all_adv, t.advertising); + if (!phylink_validate_mac_and_pcs(pl, s, &t)) { + linkmode_or(all_s, all_s, s); + linkmode_or(all_adv, all_adv, t.advertising); + } } } @@ -205,9 +507,7 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported, return -EINVAL; } - pl->mac_ops->validate(pl->config, supported, state); - - return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; + return phylink_validate_mac_and_pcs(pl, supported, state); } static int phylink_parse_fixedlink(struct phylink *pl, @@ -489,7 +789,7 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) phylink_autoneg_inband(pl->cur_link_an_mode)) { if (pl->pcs_ops) pl->pcs_ops->pcs_an_restart(pl->pcs); - else + else if (pl->config->legacy_pre_march2020) pl->mac_ops->mac_an_restart(pl->config); } } @@ -497,10 +797,21 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) static void phylink_major_config(struct phylink *pl, bool restart, const struct phylink_link_state *state) { + struct phylink_pcs *pcs = NULL; int err; phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); + if (pl->mac_ops->mac_select_pcs) { + pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); + if (IS_ERR(pcs)) { + phylink_err(pl, + "mac_select_pcs unexpectedly failed: %pe\n", + pcs); + return; + } + } + if (pl->mac_ops->mac_prepare) { err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode, state->interface); @@ -511,6 +822,12 @@ static void phylink_major_config(struct phylink *pl, bool restart, } } + /* If we have a new PCS, switch to the new PCS after preparing the MAC + * for the change. + */ + if (pcs) + phylink_set_pcs(pl, pcs); + phylink_mac_config(pl, state); if (pl->pcs_ops) { @@ -550,7 +867,7 @@ static int phylink_change_inband_advert(struct phylink *pl) if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) return 0; - if (!pl->pcs_ops) { + if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { /* Legacy method */ phylink_mac_config(pl, &pl->link_config); phylink_mac_pcs_an_restart(pl); @@ -601,7 +918,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, if (pl->pcs_ops) pl->pcs_ops->pcs_get_state(pl->pcs, state); - else if (pl->mac_ops->mac_pcs_get_state) + else if (pl->mac_ops->mac_pcs_get_state && + pl->config->legacy_pre_march2020) pl->mac_ops->mac_pcs_get_state(pl->config, state); else state->link = 0; @@ -710,6 +1028,7 @@ static void phylink_resolve(struct work_struct *w) struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; bool mac_config = false; + bool retrigger = false; bool cur_link_state; mutex_lock(&pl->state_mutex); @@ -723,6 +1042,7 @@ static void phylink_resolve(struct work_struct *w) link_state.link = false; } else if (pl->mac_link_dropped) { link_state.link = false; + retrigger = true; } else { switch (pl->cur_link_an_mode) { case MLO_AN_PHY: @@ -739,6 +1059,19 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_INBAND: phylink_mac_pcs_get_state(pl, &link_state); + /* The PCS may have a latching link-fail indicator. + * If the link was up, bring the link down and + * re-trigger the resolve. Otherwise, re-read the + * PCS state to get the current status of the link. + */ + if (!link_state.link) { + if (cur_link_state) + retrigger = true; + else + phylink_mac_pcs_get_state(pl, + &link_state); + } + /* If we have a phy, the "up" state is the union of * both the PHY and the MAC */ @@ -747,6 +1080,15 @@ static void phylink_resolve(struct work_struct *w) /* Only update if the PHY link is up */ if (pl->phydev && pl->phy_state.link) { + /* If the interface has changed, force a + * link down event if the link isn't already + * down, and re-resolve. + */ + if (link_state.interface != + pl->phy_state.interface) { + retrigger = true; + link_state.link = false; + } link_state.interface = pl->phy_state.interface; /* If we have a PHY, we need to update with @@ -771,12 +1113,11 @@ static void phylink_resolve(struct work_struct *w) } phylink_major_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; - } else if (!pl->pcs_ops) { + } else if (!pl->pcs_ops && pl->config->legacy_pre_march2020) { /* The interface remains unchanged, only the speed, * duplex or pause settings have changed. Call the * old mac_config() method to configure the MAC/PCS - * only if we do not have a PCS installed (an - * unconverted user.) + * only if we do not have a legacy MAC driver. */ phylink_mac_config(pl, &link_state); } @@ -789,7 +1130,7 @@ static void phylink_resolve(struct work_struct *w) else phylink_link_up(pl, link_state); } - if (!link_state.link && pl->mac_link_dropped) { + if (!link_state.link && retrigger) { pl->mac_link_dropped = false; queue_work(system_power_efficient_wq, &pl->resolve); } @@ -813,6 +1154,12 @@ static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) } } +static void phylink_enable_and_run_resolve(struct phylink *pl, int bit) +{ + clear_bit(bit, &pl->phylink_disable_state); + phylink_run_resolve(pl); +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -872,6 +1219,14 @@ struct phylink *phylink_create(struct phylink_config *config, struct phylink *pl; int ret; + /* Validate the supplied configuration */ + if (mac_ops->mac_select_pcs && + phy_interface_empty(config->supported_interfaces)) { + dev_err(config->dev, + "phylink: error: empty supported_interfaces but mac_select_pcs() method present\n"); + return ERR_PTR(-EINVAL); + } + pl = kzalloc(sizeof(*pl), GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); @@ -939,9 +1294,10 @@ EXPORT_SYMBOL_GPL(phylink_create); * @pl: a pointer to a &struct phylink returned from phylink_create() * @pcs: a pointer to the &struct phylink_pcs * - * Bind the MAC PCS to phylink. This may be called after phylink_create(), - * in mac_prepare() or mac_config() methods if it is desired to dynamically - * change the PCS. + * Bind the MAC PCS to phylink. This may be called after phylink_create(). + * If it is desired to dynamically change the PCS, then the preferred method + * is to use mac_select_pcs(), but it may also be called in mac_prepare() + * or mac_config(). * * Please note that there are behavioural changes with the mac_config() * callback if a PCS is present (denoting a newer setup) so removing a PCS @@ -952,6 +1308,14 @@ void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) { pl->pcs = pcs; pl->pcs_ops = pcs->ops; + + if (!pl->phylink_disable_state && + pl->cfg_link_an_mode == MLO_AN_INBAND) { + if (pl->config->pcs_poll || pcs->poll) + mod_timer(&pl->link_poll, jiffies + HZ); + else + del_timer(&pl->link_poll); + } } EXPORT_SYMBOL_GPL(phylink_set_pcs); @@ -1072,7 +1436,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, mutex_unlock(&phy->lock); phylink_dbg(pl, - "phy: setting supported %*pb advertising %*pb\n", + "phy: %s setting supported %*pb advertising %*pb\n", + phy_modes(interface), __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising); @@ -1190,6 +1555,12 @@ int phylink_fwnode_phy_connect(struct phylink *pl, if (!phy_dev) return -ENODEV; + /* Use PHY device/driver interface */ + if (pl->link_interface == PHY_INTERFACE_MODE_NA) { + pl->link_interface = phy_dev->interface; + pl->link_config.interface = pl->link_interface; + } + ret = phy_attach_direct(pl->netdev, phy_dev, flags, pl->link_interface); if (ret) { @@ -1290,8 +1661,7 @@ void phylink_start(struct phylink *pl) */ phylink_mac_initial_config(pl, true); - clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_STOPPED); if (pl->cfg_link_an_mode == MLO_AN_FIXED && pl->link_gpio) { int irq = gpiod_to_irq(pl->link_gpio); @@ -1364,6 +1734,7 @@ EXPORT_SYMBOL_GPL(phylink_stop); * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan * * Handle a network device suspend event. There are several cases: + * * - If Wake-on-Lan is not active, we can bring down the link between * the MAC and PHY by calling phylink_stop(). * - If Wake-on-Lan is active, and being handled only by the PHY, we @@ -1431,8 +1802,7 @@ void phylink_resume(struct phylink *pl) phylink_mac_initial_config(pl, true); /* Re-enable and re-resolve the link parameters */ - clear_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_MAC_WOL); } else { phylink_start(pl); } @@ -2361,8 +2731,7 @@ static void phylink_sfp_link_up(void *upstream) ASSERT_RTNL(); - clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - phylink_run_resolve(pl); + phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK); } /* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII @@ -2562,31 +2931,22 @@ void phylink_decode_usxgmii_word(struct phylink_link_state *state, EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word); /** - * phylink_mii_c22_pcs_get_state() - read the MAC PCS state - * @pcs: a pointer to a &struct mdio_device. + * phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers * @state: a pointer to a &struct phylink_link_state. + * @bmsr: The value of the %MII_BMSR register + * @lpa: The value of the %MII_LPA register * * Helper for MAC PCS supporting the 802.3 clause 22 register set for * clause 37 negotiation and/or SGMII control. * - * Read the MAC PCS state from the MII device configured in @config and - * parse the Clause 37 or Cisco SGMII link partner negotiation word into - * the phylink @state structure. This is suitable to be directly plugged - * into the mac_pcs_get_state() member of the struct phylink_mac_ops - * structure. + * Parse the Clause 37 or Cisco SGMII link partner negotiation word into + * the phylink @state structure. This is suitable to be used for implementing + * the mac_pcs_get_state() member of the struct phylink_mac_ops structure if + * accessing @bmsr and @lpa cannot be done with MDIO directly. */ -void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, - struct phylink_link_state *state) +void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, + u16 bmsr, u16 lpa) { - int bmsr, lpa; - - bmsr = mdiodev_read(pcs, MII_BMSR); - lpa = mdiodev_read(pcs, MII_LPA); - if (bmsr < 0 || lpa < 0) { - state->link = false; - return; - } - state->link = !!(bmsr & BMSR_LSTATUS); state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); /* If there is no link or autonegotiation is disabled, the LP advertisement @@ -2614,28 +2974,54 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, break; } } +EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state); + +/** + * phylink_mii_c22_pcs_get_state() - read the MAC PCS state + * @pcs: a pointer to a &struct mdio_device. + * @state: a pointer to a &struct phylink_link_state. + * + * Helper for MAC PCS supporting the 802.3 clause 22 register set for + * clause 37 negotiation and/or SGMII control. + * + * Read the MAC PCS state from the MII device configured in @config and + * parse the Clause 37 or Cisco SGMII link partner negotiation word into + * the phylink @state structure. This is suitable to be directly plugged + * into the mac_pcs_get_state() member of the struct phylink_mac_ops + * structure. + */ +void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, + struct phylink_link_state *state) +{ + int bmsr, lpa; + + bmsr = mdiodev_read(pcs, MII_BMSR); + lpa = mdiodev_read(pcs, MII_LPA); + if (bmsr < 0 || lpa < 0) { + state->link = false; + return; + } + + phylink_mii_c22_pcs_decode_state(state, bmsr, lpa); +} EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state); /** - * phylink_mii_c22_pcs_set_advertisement() - configure the clause 37 PCS + * phylink_mii_c22_pcs_encode_advertisement() - configure the clause 37 PCS * advertisement - * @pcs: a pointer to a &struct mdio_device. * @interface: the PHY interface mode being configured * @advertising: the ethtool advertisement mask * * Helper for MAC PCS supporting the 802.3 clause 22 register set for * clause 37 negotiation and/or SGMII control. * - * Configure the clause 37 PCS advertisement as specified by @state. This - * does not trigger a renegotiation; phylink will do that via the - * mac_an_restart() method of the struct phylink_mac_ops structure. + * Encode the clause 37 PCS advertisement as specified by @interface and + * @advertising. * - * Returns negative error code on failure to configure the advertisement, - * zero if no change has been made, or one if the advertisement has changed. + * Return: The new value for @adv, or ``-EINVAL`` if it should not be changed. */ -int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, - phy_interface_t interface, - const unsigned long *advertising) +int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface, + const unsigned long *advertising) { u16 adv; @@ -2649,18 +3035,15 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) adv |= ADVERTISE_1000XPSE_ASYM; - - return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, adv); - + return adv; case PHY_INTERFACE_MODE_SGMII: - return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, 0x0001); - + return 0x0001; default: /* Nothing to do for other modes */ - return 0; + return -EINVAL; } } -EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement); +EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_encode_advertisement); /** * phylink_mii_c22_pcs_config() - configure clause 22 PCS @@ -2678,16 +3061,18 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, phy_interface_t interface, const unsigned long *advertising) { - bool changed; + bool changed = 0; u16 bmcr; - int ret; + int ret, adv; - ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface, - advertising); - if (ret < 0) - return ret; - - changed = ret > 0; + adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); + if (adv >= 0) { + ret = mdiobus_modify_changed(pcs->bus, pcs->addr, + MII_ADVERTISE, 0xffff, adv); + if (ret < 0) + return ret; + changed = ret; + } /* Ensure ISOLATE bit is disabled */ if (mode == MLO_AN_INBAND && @@ -2700,7 +3085,7 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, if (ret < 0) return ret; - return changed ? 1 : 0; + return changed; } EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 1180a0e2445f..4a365f15533e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -69,6 +69,8 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ +#define PPP_PROTO_LEN 2 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -173,6 +175,7 @@ struct channel { spinlock_t downl; /* protects `chan', file.xq dequeue */ struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */ + netns_tracker ns_tracker; struct list_head clist; /* link in list of channels per unit */ rwlock_t upl; /* protects `ppp' and 'bridge' */ struct channel __rcu *bridge; /* "bridged" ppp channel */ @@ -497,6 +500,9 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, if (!pf) return -ENXIO; + /* All PPP packets should start with the 2-byte protocol */ + if (count < PPP_PROTO_LEN) + return -EINVAL; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) @@ -1764,7 +1770,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - 2; + ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; switch (proto) { case PPP_IP: @@ -2879,7 +2885,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) pch->ppp = NULL; pch->chan = chan; - pch->chan_net = get_net(net); + pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL); chan->ppp = pch; init_ppp_file(&pch->file, CHANNEL); pch->file.hdrlen = chan->hdrlen; @@ -3519,7 +3525,7 @@ ppp_disconnect_channel(struct channel *pch) */ static void ppp_destroy_channel(struct channel *pch) { - put_net(pch->chan_net); + put_net_track(pch->chan_net, &pch->ns_tracker); pch->chan_net = NULL; atomic_dec(&channel_count); diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h index c420e5948522..3d7f88b330c1 100644 --- a/drivers/net/slip/slip.h +++ b/drivers/net/slip/slip.h @@ -40,6 +40,8 @@ insmod -oslip_maxdev=nnn */ #define SL_MTU 296 /* 296; I am used to 600- FvK */ +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fecc9a1d293a..fed85447701a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -209,6 +209,9 @@ struct tun_struct { struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; + /* init args */ + struct file *file; + struct ifreq *ifr; }; struct veth { @@ -216,6 +219,9 @@ struct veth { __be16 h_vlan_TCI; }; +static void tun_flow_init(struct tun_struct *tun); +static void tun_flow_uninit(struct tun_struct *tun); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) static const struct ethtool_ops tun_ethtool_ops; +static int tun_net_init(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct ifreq *ifr = tun->ifr; + int err; + + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + spin_lock_init(&tun->lock); + + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) { + free_percpu(dev->tstats); + return err; + } + + tun_flow_init(tun); + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->features = dev->hw_features | NETIF_F_LLTX; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); + + INIT_LIST_HEAD(&tun->disabled); + err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS, false); + if (err < 0) { + tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); + free_percpu(dev->tstats); + return err; + } + return 0; +} + /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { @@ -1010,6 +1059,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; + struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; @@ -1054,6 +1104,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; + /* NETIF_F_LLTX requires to do our own update of trans_start */ + queue = netdev_get_tx_queue(dev, txq); + queue->trans_start = jiffies; + /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); @@ -1164,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) } static const struct net_device_ops tun_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1247,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) } static const struct net_device_ops tap_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1287,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun) #define MAX_MTU 65535 /* Initialize net device. */ -static void tun_net_init(struct net_device *dev) +static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -1546,7 +1602,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, case XDP_PASS: break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); @@ -2201,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev) BUG_ON(!(list_empty(&tun->disabled))); free_percpu(dev->tstats); - /* We clear tstats so that tun_set_iff() can tell if - * tun_free_netdev() has been called from register_netdevice(). - */ - dev->tstats = NULL; - tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); __tun_set_ebpf(tun, &tun->steering_prog, NULL); @@ -2711,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) { - err = -ENOMEM; - goto err_free_dev; - } + tun->ifr = ifr; + tun->file = file; - spin_lock_init(&tun->lock); - - err = security_tun_dev_alloc_security(&tun->security); - if (err < 0) - goto err_free_stat; - - tun_net_init(dev); - tun_flow_init(tun); - - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; - dev->vlan_features = dev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); - - tun->flags = (tun->flags & ~TUN_FEATURES) | - (ifr->ifr_flags & TUN_FEATURES); - - INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS, false); - if (err < 0) - goto err_free_flow; + tun_net_initialize(dev); err = register_netdevice(tun->dev); - if (err < 0) - goto err_detach; + if (err < 0) { + free_netdev(dev); + return err; + } /* free_netdev() won't check refcnt, to avoid race * with dev_put() we need publish tun after registration. */ @@ -2762,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) strcpy(ifr->ifr_name, tun->dev->name); return 0; - -err_detach: - tun_detach_all(dev); - /* We are here because register_netdevice() has failed. - * If register_netdevice() already called tun_free_netdev() - * while dealing with the error, dev->stats has been cleared. - */ - if (!dev->tstats) - goto err_free_dev; - -err_free_flow: - tun_flow_uninit(tun); - security_tun_dev_free_security(tun->security); -err_free_stat: - free_percpu(dev->tstats); -err_free_dev: - free_netdev(dev); - return err; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 42ba4af68090..71682970be58 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -9,6 +9,8 @@ #include "asix.h" +#define AX_HOST_EN_RETRIES 30 + int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data, int in_pm) { @@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm) int i, ret; u8 smsr; - for (i = 0; i < 30; ++i) { + for (i = 0; i < AX_HOST_EN_RETRIES; ++i) { ret = asix_set_sw_mii(dev, in_pm); if (ret == -ENODEV || ret == -ETIMEDOUT) break; @@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm) 0, 0, 1, &smsr, in_pm); if (ret == -ENODEV) break; - else if (ret < 0) + else if (ret < sizeof(smsr)) continue; else if (smsr & AX_HOST_EN) break; } - return ret; + return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret; } static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index ea8aa8c33241..1a627ba4b850 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1377,11 +1377,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) dev->mii.phy_id = 0x03; dev->mii.supports_gmii = 1; - dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM; + dev->net->features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO; - dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM; + dev->net->hw_features |= dev->net->features; + + netif_set_gso_max_size(dev->net, 16384); /* Enable checksum offload */ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | @@ -1526,17 +1527,19 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_hdr1, tx_hdr2; int frame_size = dev->maxpacket; - int mss = skb_shinfo(skb)->gso_size; int headroom; void *ptr; tx_hdr1 = skb->len; - tx_hdr2 = mss; + tx_hdr2 = skb_shinfo(skb)->gso_size; /* Set TSO mss */ if (((skb->len + 8) % frame_size) == 0) tx_hdr2 |= 0x80008000; /* Enable padding */ headroom = skb_headroom(skb) - 8; + if ((dev->net->features & NETIF_F_SG) && skb_linearize(skb)) + return NULL; + if ((skb_header_cloned(skb) || headroom < 0) && pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) { dev_kfree_skb_any(skb); @@ -1547,6 +1550,8 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) put_unaligned_le32(tx_hdr1, ptr); put_unaligned_le32(tx_hdr2, ptr + 4); + usbnet_set_skb_tx_stats(skb, (skb_shinfo(skb)->gso_segs ?: 1), 0); + return skb; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 24753a4da7e6..e303b522efb5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); + if (max == 0) + max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f20376c1ef3f..b8e20a3f2b84 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -67,7 +67,7 @@ #define DEFAULT_TSO_CSUM_ENABLE (true) #define DEFAULT_VLAN_FILTER_ENABLE (true) #define DEFAULT_VLAN_RX_OFFLOAD (true) -#define TX_OVERHEAD (8) +#define TX_ALIGNMENT (4) #define RXW_PADDING 2 #define LAN78XX_USB_VENDOR_ID (0x0424) @@ -76,6 +76,8 @@ #define LAN7801_USB_PRODUCT_ID (0x7801) #define LAN78XX_EEPROM_MAGIC (0x78A5) #define LAN78XX_OTP_MAGIC (0x78F3) +#define AT29M2AF_USB_VENDOR_ID (0x07C9) +#define AT29M2AF_USB_PRODUCT_ID (0x0012) #define MII_READ 1 #define MII_WRITE 0 @@ -90,6 +92,41 @@ WAKE_MCAST | WAKE_BCAST | \ WAKE_ARP | WAKE_MAGIC) +#define LAN78XX_NAPI_WEIGHT 64 + +#define TX_URB_NUM 10 +#define TX_SS_URB_NUM TX_URB_NUM +#define TX_HS_URB_NUM TX_URB_NUM +#define TX_FS_URB_NUM TX_URB_NUM + +/* A single URB buffer must be large enough to hold a complete jumbo packet + */ +#define TX_SS_URB_SIZE (32 * 1024) +#define TX_HS_URB_SIZE (16 * 1024) +#define TX_FS_URB_SIZE (10 * 1024) + +#define RX_SS_URB_NUM 30 +#define RX_HS_URB_NUM 10 +#define RX_FS_URB_NUM 10 +#define RX_SS_URB_SIZE TX_SS_URB_SIZE +#define RX_HS_URB_SIZE TX_HS_URB_SIZE +#define RX_FS_URB_SIZE TX_FS_URB_SIZE + +#define SS_BURST_CAP_SIZE RX_SS_URB_SIZE +#define SS_BULK_IN_DELAY 0x2000 +#define HS_BURST_CAP_SIZE RX_HS_URB_SIZE +#define HS_BULK_IN_DELAY 0x2000 +#define FS_BURST_CAP_SIZE RX_FS_URB_SIZE +#define FS_BULK_IN_DELAY 0x2000 + +#define TX_CMD_LEN 8 +#define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN) +#define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN) + +#define RX_CMD_LEN 10 +#define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN) +#define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN) + /* USB related defines */ #define BULK_IN_PIPE 1 #define BULK_OUT_PIPE 2 @@ -385,14 +422,22 @@ struct lan78xx_net { struct usb_interface *intf; void *driver_priv; - int rx_qlen; - int tx_qlen; + unsigned int tx_pend_data_len; + size_t n_tx_urbs; + size_t n_rx_urbs; + size_t tx_urb_size; + size_t rx_urb_size; + + struct sk_buff_head rxq_free; struct sk_buff_head rxq; + struct sk_buff_head rxq_done; + struct sk_buff_head rxq_overflow; + struct sk_buff_head txq_free; struct sk_buff_head txq; - struct sk_buff_head done; struct sk_buff_head txq_pend; - struct tasklet_struct bh; + struct napi_struct napi; + struct delayed_work wq; int msg_enable; @@ -404,8 +449,8 @@ struct lan78xx_net { struct mutex phy_mutex; /* for phy access */ unsigned int pipe_in, pipe_out, pipe_intr; - u32 hard_mtu; /* count any extra framing */ - size_t rx_urb_size; /* size for rx urbs */ + unsigned int bulk_in_delay; + unsigned int burst_cap; unsigned long flags; @@ -443,6 +488,129 @@ static int msg_level = -1; module_param(msg_level, int, 0); MODULE_PARM_DESC(msg_level, "Override default message level"); +static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool) +{ + if (skb_queue_empty(buf_pool)) + return NULL; + + return skb_dequeue(buf_pool); +} + +static void lan78xx_release_buf(struct sk_buff_head *buf_pool, + struct sk_buff *buf) +{ + buf->data = buf->head; + skb_reset_tail_pointer(buf); + + buf->len = 0; + buf->data_len = 0; + + skb_queue_tail(buf_pool, buf); +} + +static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool) +{ + struct skb_data *entry; + struct sk_buff *buf; + + while (!skb_queue_empty(buf_pool)) { + buf = skb_dequeue(buf_pool); + if (buf) { + entry = (struct skb_data *)buf->cb; + usb_free_urb(entry->urb); + dev_kfree_skb_any(buf); + } + } +} + +static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool, + size_t n_urbs, size_t urb_size, + struct lan78xx_net *dev) +{ + struct skb_data *entry; + struct sk_buff *buf; + struct urb *urb; + int i; + + skb_queue_head_init(buf_pool); + + for (i = 0; i < n_urbs; i++) { + buf = alloc_skb(urb_size, GFP_ATOMIC); + if (!buf) + goto error; + + if (skb_linearize(buf) != 0) { + dev_kfree_skb_any(buf); + goto error; + } + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + dev_kfree_skb_any(buf); + goto error; + } + + entry = (struct skb_data *)buf->cb; + entry->urb = urb; + entry->dev = dev; + entry->length = 0; + entry->num_of_packet = 0; + + skb_queue_tail(buf_pool, buf); + } + + return 0; + +error: + lan78xx_free_buf_pool(buf_pool); + + return -ENOMEM; +} + +static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev) +{ + return lan78xx_get_buf(&dev->rxq_free); +} + +static void lan78xx_release_rx_buf(struct lan78xx_net *dev, + struct sk_buff *rx_buf) +{ + lan78xx_release_buf(&dev->rxq_free, rx_buf); +} + +static void lan78xx_free_rx_resources(struct lan78xx_net *dev) +{ + lan78xx_free_buf_pool(&dev->rxq_free); +} + +static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev) +{ + return lan78xx_alloc_buf_pool(&dev->rxq_free, + dev->n_rx_urbs, dev->rx_urb_size, dev); +} + +static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev) +{ + return lan78xx_get_buf(&dev->txq_free); +} + +static void lan78xx_release_tx_buf(struct lan78xx_net *dev, + struct sk_buff *tx_buf) +{ + lan78xx_release_buf(&dev->txq_free, tx_buf); +} + +static void lan78xx_free_tx_resources(struct lan78xx_net *dev) +{ + lan78xx_free_buf_pool(&dev->txq_free); +} + +static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev) +{ + return lan78xx_alloc_buf_pool(&dev->txq_free, + dev->n_tx_urbs, dev->tx_urb_size, dev); +} + static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) { u32 *buf; @@ -1200,6 +1368,8 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, return 0; } +static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev); + static int lan78xx_mac_reset(struct lan78xx_net *dev) { unsigned long start_time = jiffies; @@ -1331,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) jiffies + STAT_UPDATE_TIMER); } - tasklet_schedule(&dev->bh); + lan78xx_rx_urb_submit_all(dev); + + napi_schedule(&dev->napi); } return 0; @@ -2228,7 +2400,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (dev->domain_data.phyirq > 0) phydev->irq = dev->domain_data.phyirq; else - phydev->irq = 0; + phydev->irq = PHY_POLL; netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); /* set to AUTOMDIX */ @@ -2371,37 +2543,24 @@ found: static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) { struct lan78xx_net *dev = netdev_priv(netdev); - int ll_mtu = new_mtu + netdev->hard_header_len; - int old_hard_mtu = dev->hard_mtu; - int old_rx_urb_size = dev->rx_urb_size; + int max_frame_len = RX_MAX_FRAME_LEN(new_mtu); int ret; /* no second zero-length packet read wanted after mtu-sized packets */ - if ((ll_mtu % dev->maxpacket) == 0) + if ((max_frame_len % dev->maxpacket) == 0) return -EDOM; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; - lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); - - netdev->mtu = new_mtu; - - dev->hard_mtu = netdev->mtu + netdev->hard_header_len; - if (dev->rx_urb_size == old_hard_mtu) { - dev->rx_urb_size = dev->hard_mtu; - if (dev->rx_urb_size > old_rx_urb_size) { - if (netif_running(dev->net)) { - unlink_urbs(dev, &dev->rxq); - tasklet_schedule(&dev->bh); - } - } - } + ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len); + if (!ret) + netdev->mtu = new_mtu; usb_autopm_put_interface(dev->intf); - return 0; + return ret; } static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) @@ -2557,6 +2716,44 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev) lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); } +static int lan78xx_urb_config_init(struct lan78xx_net *dev) +{ + int result = 0; + + switch (dev->udev->speed) { + case USB_SPEED_SUPER: + dev->rx_urb_size = RX_SS_URB_SIZE; + dev->tx_urb_size = TX_SS_URB_SIZE; + dev->n_rx_urbs = RX_SS_URB_NUM; + dev->n_tx_urbs = TX_SS_URB_NUM; + dev->bulk_in_delay = SS_BULK_IN_DELAY; + dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE; + break; + case USB_SPEED_HIGH: + dev->rx_urb_size = RX_HS_URB_SIZE; + dev->tx_urb_size = TX_HS_URB_SIZE; + dev->n_rx_urbs = RX_HS_URB_NUM; + dev->n_tx_urbs = TX_HS_URB_NUM; + dev->bulk_in_delay = HS_BULK_IN_DELAY; + dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; + break; + case USB_SPEED_FULL: + dev->rx_urb_size = RX_FS_URB_SIZE; + dev->tx_urb_size = TX_FS_URB_SIZE; + dev->n_rx_urbs = RX_FS_URB_NUM; + dev->n_tx_urbs = TX_FS_URB_NUM; + dev->bulk_in_delay = FS_BULK_IN_DELAY; + dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; + break; + default: + netdev_warn(dev->net, "USB bus speed not supported\n"); + result = -EIO; + break; + } + + return result; +} + static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable) { return lan78xx_update_reg(dev, reg, hw_enable, hw_enable); @@ -2764,28 +2961,11 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* Init LTM */ lan78xx_init_ltm(dev); - if (dev->udev->speed == USB_SPEED_SUPER) { - buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; - dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; - dev->rx_qlen = 4; - dev->tx_qlen = 4; - } else if (dev->udev->speed == USB_SPEED_HIGH) { - buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE; - dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; - dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size; - dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu; - } else { - buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; - dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; - dev->rx_qlen = 4; - dev->tx_qlen = 4; - } - - ret = lan78xx_write_reg(dev, BURST_CAP, buf); + ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap); if (ret < 0) return ret; - ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); + ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay); if (ret < 0) return ret; @@ -2898,7 +3078,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) return ret; ret = lan78xx_set_rx_max_frame_length(dev, - dev->net->mtu + VLAN_ETH_HLEN); + RX_MAX_FRAME_LEN(dev->net->mtu)); return ret; } @@ -2978,6 +3158,8 @@ static int lan78xx_open(struct net_device *net) dev->link_on = false; + napi_enable(&dev->napi); + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); done: mutex_unlock(&dev->dev_mutex); @@ -3011,15 +3193,16 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); - while (!skb_queue_empty(&dev->done)) { - struct skb_data *entry; - struct sk_buff *skb; + /* empty Rx done, Rx overflow and Tx pend queues + */ + while (!skb_queue_empty(&dev->rxq_done)) { + struct sk_buff *skb = skb_dequeue(&dev->rxq_done); - skb = skb_dequeue(&dev->done); - entry = (struct skb_data *)(skb->cb); - usb_free_urb(entry->urb); - dev_kfree_skb(skb); + lan78xx_release_rx_buf(dev, skb); } + + skb_queue_purge(&dev->rxq_overflow); + skb_queue_purge(&dev->txq_pend); } static int lan78xx_stop(struct net_device *net) @@ -3035,7 +3218,7 @@ static int lan78xx_stop(struct net_device *net) clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); - tasklet_kill(&dev->bh); + napi_disable(&dev->napi); lan78xx_terminate_urbs(dev); @@ -3071,48 +3254,6 @@ static int lan78xx_stop(struct net_device *net) return 0; } -static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, - struct sk_buff *skb, gfp_t flags) -{ - u32 tx_cmd_a, tx_cmd_b; - void *ptr; - - if (skb_cow_head(skb, TX_OVERHEAD)) { - dev_kfree_skb_any(skb); - return NULL; - } - - if (skb_linearize(skb)) { - dev_kfree_skb_any(skb); - return NULL; - } - - tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; - - tx_cmd_b = 0; - if (skb_is_gso(skb)) { - u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); - - tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; - - tx_cmd_a |= TX_CMD_A_LSO_; - } - - if (skb_vlan_tag_present(skb)) { - tx_cmd_a |= TX_CMD_A_IVTG_; - tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; - } - - ptr = skb_push(skb, 8); - put_unaligned_le32(tx_cmd_a, ptr); - put_unaligned_le32(tx_cmd_b, ptr + 4); - - return skb; -} - static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, struct sk_buff_head *list, enum skb_state state) { @@ -3126,12 +3267,13 @@ static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, __skb_unlink(skb, list); spin_unlock(&list->lock); - spin_lock(&dev->done.lock); + spin_lock(&dev->rxq_done.lock); + + __skb_queue_tail(&dev->rxq_done, skb); + if (skb_queue_len(&dev->rxq_done) == 1) + napi_schedule(&dev->napi); - __skb_queue_tail(&dev->done, skb); - if (skb_queue_len(&dev->done) == 1) - tasklet_schedule(&dev->bh); - spin_unlock_irqrestore(&dev->done.lock, flags); + spin_unlock_irqrestore(&dev->rxq_done.lock, flags); return old_state; } @@ -3146,7 +3288,7 @@ static void tx_complete(struct urb *urb) dev->net->stats.tx_packets += entry->num_of_packet; dev->net->stats.tx_bytes += entry->length; } else { - dev->net->stats.tx_errors++; + dev->net->stats.tx_errors += entry->num_of_packet; switch (urb->status) { case -EPIPE: @@ -3179,7 +3321,15 @@ static void tx_complete(struct urb *urb) usb_autopm_put_interface_async(dev->intf); - defer_bh(dev, skb, &dev->txq, tx_done); + skb_unlink(skb, &dev->txq); + + lan78xx_release_tx_buf(dev, skb); + + /* Re-schedule NAPI if Tx data pending but no URBs in progress. + */ + if (skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->txq_pend)) + napi_schedule(&dev->napi); } static void lan78xx_queue_skb(struct sk_buff_head *list, @@ -3191,35 +3341,96 @@ static void lan78xx_queue_skb(struct sk_buff_head *list, entry->state = state; } +static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev) +{ + return skb_queue_len(&dev->txq_free) * dev->tx_urb_size; +} + +static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev) +{ + return dev->tx_pend_data_len; +} + +static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev, + struct sk_buff *skb, + unsigned int *tx_pend_data_len) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->txq_pend.lock, flags); + + __skb_queue_tail(&dev->txq_pend, skb); + + dev->tx_pend_data_len += skb->len; + *tx_pend_data_len = dev->tx_pend_data_len; + + spin_unlock_irqrestore(&dev->txq_pend.lock, flags); +} + +static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev, + struct sk_buff *skb, + unsigned int *tx_pend_data_len) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->txq_pend.lock, flags); + + __skb_queue_head(&dev->txq_pend, skb); + + dev->tx_pend_data_len += skb->len; + *tx_pend_data_len = dev->tx_pend_data_len; + + spin_unlock_irqrestore(&dev->txq_pend.lock, flags); +} + +static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev, + struct sk_buff **skb, + unsigned int *tx_pend_data_len) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->txq_pend.lock, flags); + + *skb = __skb_dequeue(&dev->txq_pend); + if (*skb) + dev->tx_pend_data_len -= (*skb)->len; + *tx_pend_data_len = dev->tx_pend_data_len; + + spin_unlock_irqrestore(&dev->txq_pend.lock, flags); +} + static netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); - struct sk_buff *skb2 = NULL; + unsigned int tx_pend_data_len; if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) schedule_delayed_work(&dev->wq, 0); - if (skb) { - skb_tx_timestamp(skb); - skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); - } + skb_tx_timestamp(skb); - if (skb2) { - skb_queue_tail(&dev->txq_pend, skb2); + lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len); - /* throttle TX patch at slower than SUPER SPEED USB */ - if ((dev->udev->speed < USB_SPEED_SUPER) && - (skb_queue_len(&dev->txq_pend) > 10)) - netif_stop_queue(net); - } else { - netif_dbg(dev, tx_err, dev->net, - "lan78xx_tx_prep return NULL\n"); - dev->net->stats.tx_errors++; - dev->net->stats.tx_dropped++; - } + /* Set up a Tx URB if none is in progress */ + + if (skb_queue_empty(&dev->txq)) + napi_schedule(&dev->napi); + + /* Stop stack Tx queue if we have enough data to fill + * all the free Tx URBs. + */ + if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) { + netif_stop_queue(net); + + netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u", + tx_pend_data_len, lan78xx_tx_urb_space(dev)); - tasklet_schedule(&dev->bh); + /* Kick off transmission of pending data */ + + if (!skb_queue_empty(&dev->txq_free)) + napi_schedule(&dev->napi); + } return NETDEV_TX_OK; } @@ -3276,9 +3487,6 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) goto out1; } - dev->net->hard_header_len += TX_OVERHEAD; - dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; - /* Init all registers */ ret = lan78xx_reset(dev); if (ret) { @@ -3357,8 +3565,6 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { - int status; - dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; @@ -3371,21 +3577,21 @@ static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) if (skb_defer_rx_timestamp(skb)) return; - status = netif_rx(skb); - if (status != NET_RX_SUCCESS) - netif_dbg(dev, rx_err, dev->net, - "netif_rx status %d\n", status); + napi_gro_receive(&dev->napi, skb); } -static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) +static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb, + int budget, int *work_done) { - if (skb->len < dev->net->hard_header_len) + if (skb->len < RX_SKB_MIN_LEN) return 0; + /* Extract frames from the URB buffer and pass each one to + * the stack in a new NAPI SKB. + */ while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; u16 rx_cmd_c; - struct sk_buff *skb2; unsigned char *packet; rx_cmd_a = get_unaligned_le32(skb->data); @@ -3407,41 +3613,36 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x", rx_cmd_a); } else { - /* last frame in this batch */ - if (skb->len == size) { - lan78xx_rx_csum_offload(dev, skb, - rx_cmd_a, rx_cmd_b); - lan78xx_rx_vlan_offload(dev, skb, - rx_cmd_a, rx_cmd_b); - - skb_trim(skb, skb->len - 4); /* remove fcs */ - skb->truesize = size + sizeof(struct sk_buff); - - return 1; - } + u32 frame_len = size - ETH_FCS_LEN; + struct sk_buff *skb2; - skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) { - netdev_warn(dev->net, "Error allocating skb"); + skb2 = napi_alloc_skb(&dev->napi, frame_len); + if (!skb2) return 0; - } - skb2->len = size; - skb2->data = packet; - skb_set_tail_pointer(skb2, size); + memcpy(skb2->data, packet, frame_len); + + skb_put(skb2, frame_len); lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); - skb_trim(skb2, skb2->len - 4); /* remove fcs */ - skb2->truesize = size + sizeof(struct sk_buff); - - lan78xx_skb_return(dev, skb2); + /* Processing of the URB buffer must complete once + * it has started. If the NAPI work budget is exhausted + * while frames remain they are added to the overflow + * queue for delivery in the next NAPI polling cycle. + */ + if (*work_done < budget) { + lan78xx_skb_return(dev, skb2); + ++(*work_done); + } else { + skb_queue_tail(&dev->rxq_overflow, skb2); + } } skb_pull(skb, size); - /* padding bytes before the next frame starts */ + /* skip padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } @@ -3449,85 +3650,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) return 1; } -static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb) +static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb, + int budget, int *work_done) { - if (!lan78xx_rx(dev, skb)) { + if (!lan78xx_rx(dev, skb, budget, work_done)) { + netif_dbg(dev, rx_err, dev->net, "drop\n"); dev->net->stats.rx_errors++; - goto done; - } - - if (skb->len) { - lan78xx_skb_return(dev, skb); - return; - } - - netif_dbg(dev, rx_err, dev->net, "drop\n"); - dev->net->stats.rx_errors++; -done: - skb_queue_tail(&dev->done, skb); -} - -static void rx_complete(struct urb *urb); - -static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags) -{ - struct sk_buff *skb; - struct skb_data *entry; - unsigned long lockflags; - size_t size = dev->rx_urb_size; - int ret = 0; - - skb = netdev_alloc_skb_ip_align(dev->net, size); - if (!skb) { - usb_free_urb(urb); - return -ENOMEM; - } - - entry = (struct skb_data *)skb->cb; - entry->urb = urb; - entry->dev = dev; - entry->length = 0; - - usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, - skb->data, size, rx_complete, skb); - - spin_lock_irqsave(&dev->rxq.lock, lockflags); - - if (netif_device_present(dev->net) && - netif_running(dev->net) && - !test_bit(EVENT_RX_HALT, &dev->flags) && - !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { - ret = usb_submit_urb(urb, GFP_ATOMIC); - switch (ret) { - case 0: - lan78xx_queue_skb(&dev->rxq, skb, rx_start); - break; - case -EPIPE: - lan78xx_defer_kevent(dev, EVENT_RX_HALT); - break; - case -ENODEV: - case -ENOENT: - netif_dbg(dev, ifdown, dev->net, "device gone\n"); - netif_device_detach(dev->net); - break; - case -EHOSTUNREACH: - ret = -ENOLINK; - break; - default: - netif_dbg(dev, rx_err, dev->net, - "rx submit, %d\n", ret); - tasklet_schedule(&dev->bh); - } - } else { - netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); - ret = -ENOLINK; - } - spin_unlock_irqrestore(&dev->rxq.lock, lockflags); - if (ret) { - dev_kfree_skb_any(skb); - usb_free_urb(urb); } - return ret; } static void rx_complete(struct urb *urb) @@ -3538,13 +3667,18 @@ static void rx_complete(struct urb *urb) int urb_status = urb->status; enum skb_state state; + netif_dbg(dev, rx_status, dev->net, + "rx done: status %d", urb->status); + skb_put(skb, urb->actual_length); state = rx_done; - entry->urb = NULL; + + if (urb != entry->urb) + netif_warn(dev, rx_err, dev->net, "URB pointer mismatch"); switch (urb_status) { case 0: - if (skb->len < dev->net->hard_header_len) { + if (skb->len < RX_SKB_MIN_LEN) { state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; @@ -3562,16 +3696,12 @@ static void rx_complete(struct urb *urb) netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); state = rx_cleanup; - entry->urb = urb; - urb = NULL; break; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; state = rx_cleanup; - entry->urb = urb; - urb = NULL; break; /* data overrun ... flush fifo? */ @@ -3587,203 +3717,327 @@ static void rx_complete(struct urb *urb) } state = defer_bh(dev, skb, &dev->rxq, state); +} - if (urb) { - if (netif_running(dev->net) && - !test_bit(EVENT_RX_HALT, &dev->flags) && - state != unlink_start) { - rx_submit(dev, urb, GFP_ATOMIC); - return; +static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) +{ + struct skb_data *entry = (struct skb_data *)skb->cb; + size_t size = dev->rx_urb_size; + struct urb *urb = entry->urb; + unsigned long lockflags; + int ret = 0; + + usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, + skb->data, size, rx_complete, skb); + + spin_lock_irqsave(&dev->rxq.lock, lockflags); + + if (netif_device_present(dev->net) && + netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + ret = usb_submit_urb(urb, flags); + switch (ret) { + case 0: + lan78xx_queue_skb(&dev->rxq, skb, rx_start); + break; + case -EPIPE: + lan78xx_defer_kevent(dev, EVENT_RX_HALT); + break; + case -ENODEV: + case -ENOENT: + netif_dbg(dev, ifdown, dev->net, "device gone\n"); + netif_device_detach(dev->net); + break; + case -EHOSTUNREACH: + ret = -ENOLINK; + napi_schedule(&dev->napi); + break; + default: + netif_dbg(dev, rx_err, dev->net, + "rx submit, %d\n", ret); + napi_schedule(&dev->napi); + break; } - usb_free_urb(urb); + } else { + netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); + ret = -ENOLINK; } - netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); + spin_unlock_irqrestore(&dev->rxq.lock, lockflags); + + if (ret) + lan78xx_release_rx_buf(dev, skb); + + return ret; } -static void lan78xx_tx_bh(struct lan78xx_net *dev) +static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev) { - int length; - struct urb *urb = NULL; - struct skb_data *entry; - unsigned long flags; - struct sk_buff_head *tqp = &dev->txq_pend; - struct sk_buff *skb, *skb2; - int ret; - int count, pos; - int skb_totallen, pkt_cnt; - - skb_totallen = 0; - pkt_cnt = 0; - count = 0; - length = 0; - spin_lock_irqsave(&tqp->lock, flags); - skb_queue_walk(tqp, skb) { - if (skb_is_gso(skb)) { - if (!skb_queue_is_first(tqp, skb)) { - /* handle previous packets first */ - break; - } - count = 1; - length = skb->len - TX_OVERHEAD; - __skb_unlink(skb, tqp); - spin_unlock_irqrestore(&tqp->lock, flags); - goto gso_skb; - } + struct sk_buff *rx_buf; - if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE) + /* Ensure the maximum number of Rx URBs is submitted + */ + while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) { + if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0) break; - skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); - pkt_cnt++; - } - spin_unlock_irqrestore(&tqp->lock, flags); - - /* copy to a single skb */ - skb = alloc_skb(skb_totallen, GFP_ATOMIC); - if (!skb) - goto drop; - - skb_put(skb, skb_totallen); - - for (count = pos = 0; count < pkt_cnt; count++) { - skb2 = skb_dequeue(tqp); - if (skb2) { - length += (skb2->len - TX_OVERHEAD); - memcpy(skb->data + pos, skb2->data, skb2->len); - pos += roundup(skb2->len, sizeof(u32)); - dev_kfree_skb(skb2); - } } +} -gso_skb: - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - goto drop; +static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev, + struct sk_buff *rx_buf) +{ + /* reset SKB data pointers */ - entry = (struct skb_data *)skb->cb; - entry->urb = urb; - entry->dev = dev; - entry->length = length; - entry->num_of_packet = count; + rx_buf->data = rx_buf->head; + skb_reset_tail_pointer(rx_buf); + rx_buf->len = 0; + rx_buf->data_len = 0; - spin_lock_irqsave(&dev->txq.lock, flags); - ret = usb_autopm_get_interface_async(dev->intf); - if (ret < 0) { - spin_unlock_irqrestore(&dev->txq.lock, flags); - goto drop; - } + rx_submit(dev, rx_buf, GFP_ATOMIC); +} - usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out, - skb->data, skb->len, tx_complete, skb); +static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer) +{ + u32 tx_cmd_a; + u32 tx_cmd_b; - if (length % dev->maxpacket == 0) { - /* send USB_ZERO_PACKET */ - urb->transfer_flags |= URB_ZERO_PACKET; - } + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; -#ifdef CONFIG_PM - /* if this triggers the device is still a sleep */ - if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { - /* transmission will be done in resume */ - usb_anchor_urb(urb, &dev->deferred); - /* no use to process more packets */ - netif_stop_queue(dev->net); - usb_put_urb(urb); - spin_unlock_irqrestore(&dev->txq.lock, flags); - netdev_dbg(dev->net, "Delaying transmission for resumption\n"); - return; + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; + + tx_cmd_b = 0; + if (skb_is_gso(skb)) { + u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); + + tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; + + tx_cmd_a |= TX_CMD_A_LSO_; } -#endif - ret = usb_submit_urb(urb, GFP_ATOMIC); - switch (ret) { - case 0: - netif_trans_update(dev->net); - lan78xx_queue_skb(&dev->txq, skb, tx_start); - if (skb_queue_len(&dev->txq) >= dev->tx_qlen) - netif_stop_queue(dev->net); - break; - case -EPIPE: - netif_stop_queue(dev->net); - lan78xx_defer_kevent(dev, EVENT_TX_HALT); - usb_autopm_put_interface_async(dev->intf); - break; - case -ENODEV: - case -ENOENT: - netif_dbg(dev, tx_err, dev->net, - "tx: submit urb err %d (disconnected?)", ret); - netif_device_detach(dev->net); - break; - default: - usb_autopm_put_interface_async(dev->intf); - netif_dbg(dev, tx_err, dev->net, - "tx: submit urb err %d\n", ret); - break; + if (skb_vlan_tag_present(skb)) { + tx_cmd_a |= TX_CMD_A_IVTG_; + tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; } - spin_unlock_irqrestore(&dev->txq.lock, flags); + put_unaligned_le32(tx_cmd_a, buffer); + put_unaligned_le32(tx_cmd_b, buffer + 4); +} - if (ret) { - netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret); -drop: - dev->net->stats.tx_dropped++; - if (skb) +static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev, + struct sk_buff *tx_buf) +{ + struct skb_data *entry = (struct skb_data *)tx_buf->cb; + int remain = dev->tx_urb_size; + u8 *tx_data = tx_buf->data; + u32 urb_len = 0; + + entry->num_of_packet = 0; + entry->length = 0; + + /* Work through the pending SKBs and copy the data of each SKB into + * the URB buffer if there room for all the SKB data. + * + * There must be at least DST+SRC+TYPE in the SKB (with padding enabled) + */ + while (remain >= TX_SKB_MIN_LEN) { + unsigned int pending_bytes; + unsigned int align_bytes; + struct sk_buff *skb; + unsigned int len; + + lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes); + + if (!skb) + break; + + align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) % + TX_ALIGNMENT; + len = align_bytes + TX_CMD_LEN + skb->len; + if (len > remain) { + lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes); + break; + } + + tx_data += align_bytes; + + lan78xx_fill_tx_cmd_words(skb, tx_data); + tx_data += TX_CMD_LEN; + + len = skb->len; + if (skb_copy_bits(skb, 0, tx_data, len) < 0) { + struct net_device_stats *stats = &dev->net->stats; + + stats->tx_dropped++; dev_kfree_skb_any(skb); - usb_free_urb(urb); - } else { - netif_dbg(dev, tx_queued, dev->net, - "> tx, len %d, type 0x%x\n", length, skb->protocol); + tx_data -= TX_CMD_LEN; + continue; + } + + tx_data += len; + entry->length += len; + entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1; + + dev_kfree_skb_any(skb); + + urb_len = (u32)(tx_data - (u8 *)tx_buf->data); + + remain = dev->tx_urb_size - urb_len; } + + skb_put(tx_buf, urb_len); + + return entry; } -static void lan78xx_rx_bh(struct lan78xx_net *dev) +static void lan78xx_tx_bh(struct lan78xx_net *dev) { - struct urb *urb; - int i; + int ret; - if (skb_queue_len(&dev->rxq) < dev->rx_qlen) { - for (i = 0; i < 10; i++) { - if (skb_queue_len(&dev->rxq) >= dev->rx_qlen) - break; - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (urb) - if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK) - return; + /* Start the stack Tx queue if it was stopped + */ + netif_tx_lock(dev->net); + if (netif_queue_stopped(dev->net)) { + if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)) + netif_wake_queue(dev->net); + } + netif_tx_unlock(dev->net); + + /* Go through the Tx pending queue and set up URBs to transfer + * the data to the device. Stop if no more pending data or URBs, + * or if an error occurs when a URB is submitted. + */ + do { + struct skb_data *entry; + struct sk_buff *tx_buf; + unsigned long flags; + + if (skb_queue_empty(&dev->txq_pend)) + break; + + tx_buf = lan78xx_get_tx_buf(dev); + if (!tx_buf) + break; + + entry = lan78xx_tx_buf_fill(dev, tx_buf); + + spin_lock_irqsave(&dev->txq.lock, flags); + ret = usb_autopm_get_interface_async(dev->intf); + if (ret < 0) { + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto out; } - if (skb_queue_len(&dev->rxq) < dev->rx_qlen) - tasklet_schedule(&dev->bh); - } - if (skb_queue_len(&dev->txq) < dev->tx_qlen) - netif_wake_queue(dev->net); + usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out, + tx_buf->data, tx_buf->len, tx_complete, + tx_buf); + + if (tx_buf->len % dev->maxpacket == 0) { + /* send USB_ZERO_PACKET */ + entry->urb->transfer_flags |= URB_ZERO_PACKET; + } + +#ifdef CONFIG_PM + /* if device is asleep stop outgoing packet processing */ + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + usb_anchor_urb(entry->urb, &dev->deferred); + netif_stop_queue(dev->net); + spin_unlock_irqrestore(&dev->txq.lock, flags); + netdev_dbg(dev->net, + "Delaying transmission for resumption\n"); + return; + } +#endif + ret = usb_submit_urb(entry->urb, GFP_ATOMIC); + switch (ret) { + case 0: + netif_trans_update(dev->net); + lan78xx_queue_skb(&dev->txq, tx_buf, tx_start); + break; + case -EPIPE: + netif_stop_queue(dev->net); + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + usb_autopm_put_interface_async(dev->intf); + break; + case -ENODEV: + case -ENOENT: + netif_dbg(dev, tx_err, dev->net, + "tx submit urb err %d (disconnected?)", ret); + netif_device_detach(dev->net); + break; + default: + usb_autopm_put_interface_async(dev->intf); + netif_dbg(dev, tx_err, dev->net, + "tx submit urb err %d\n", ret); + break; + } + + spin_unlock_irqrestore(&dev->txq.lock, flags); + + if (ret) { + netdev_warn(dev->net, "failed to tx urb %d\n", ret); +out: + dev->net->stats.tx_dropped += entry->num_of_packet; + lan78xx_release_tx_buf(dev, tx_buf); + } + } while (ret == 0); } -static void lan78xx_bh(struct tasklet_struct *t) +static int lan78xx_bh(struct lan78xx_net *dev, int budget) { - struct lan78xx_net *dev = from_tasklet(dev, t, bh); - struct sk_buff *skb; + struct sk_buff_head done; + struct sk_buff *rx_buf; struct skb_data *entry; + unsigned long flags; + int work_done = 0; + + /* Pass frames received in the last NAPI cycle before + * working on newly completed URBs. + */ + while (!skb_queue_empty(&dev->rxq_overflow)) { + lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow)); + ++work_done; + } - while ((skb = skb_dequeue(&dev->done))) { - entry = (struct skb_data *)(skb->cb); + /* Take a snapshot of the done queue and move items to a + * temporary queue. Rx URB completions will continue to add + * to the done queue. + */ + __skb_queue_head_init(&done); + + spin_lock_irqsave(&dev->rxq_done.lock, flags); + skb_queue_splice_init(&dev->rxq_done, &done); + spin_unlock_irqrestore(&dev->rxq_done.lock, flags); + + /* Extract receive frames from completed URBs and + * pass them to the stack. Re-submit each completed URB. + */ + while ((work_done < budget) && + (rx_buf = __skb_dequeue(&done))) { + entry = (struct skb_data *)(rx_buf->cb); switch (entry->state) { case rx_done: - entry->state = rx_cleanup; - rx_process(dev, skb); - continue; - case tx_done: - usb_free_urb(entry->urb); - dev_kfree_skb(skb); - continue; + rx_process(dev, rx_buf, budget, &work_done); + break; case rx_cleanup: - usb_free_urb(entry->urb); - dev_kfree_skb(skb); - continue; + break; default: - netdev_dbg(dev->net, "skb state %d\n", entry->state); - return; + netdev_dbg(dev->net, "rx buf state %d\n", + entry->state); + break; } + + lan78xx_rx_urb_resubmit(dev, rx_buf); } + /* If budget was consumed before processing all the URBs put them + * back on the front of the done queue. They will be first to be + * processed in the next NAPI cycle. + */ + spin_lock_irqsave(&dev->rxq_done.lock, flags); + skb_queue_splice(&done, &dev->rxq_done); + spin_unlock_irqrestore(&dev->rxq_done.lock, flags); + if (netif_device_present(dev->net) && netif_running(dev->net)) { /* reset update timer delta */ if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) { @@ -3792,12 +4046,61 @@ static void lan78xx_bh(struct tasklet_struct *t) jiffies + STAT_UPDATE_TIMER); } - if (!skb_queue_empty(&dev->txq_pend)) - lan78xx_tx_bh(dev); + /* Submit all free Rx URBs */ if (!test_bit(EVENT_RX_HALT, &dev->flags)) - lan78xx_rx_bh(dev); + lan78xx_rx_urb_submit_all(dev); + + /* Submit new Tx URBs */ + + lan78xx_tx_bh(dev); } + + return work_done; +} + +static int lan78xx_poll(struct napi_struct *napi, int budget) +{ + struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi); + int result = budget; + int work_done; + + /* Don't do any work if the device is suspended */ + + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + napi_complete_done(napi, 0); + return 0; + } + + /* Process completed URBs and submit new URBs */ + + work_done = lan78xx_bh(dev, budget); + + if (work_done < budget) { + napi_complete_done(napi, work_done); + + /* Start a new polling cycle if data was received or + * data is waiting to be transmitted. + */ + if (!skb_queue_empty(&dev->rxq_done)) { + napi_schedule(napi); + } else if (netif_carrier_ok(dev->net)) { + if (skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->txq_pend)) { + napi_schedule(napi); + } else { + netif_tx_lock(dev->net); + if (netif_queue_stopped(dev->net)) { + netif_wake_queue(dev->net); + napi_schedule(napi); + } + netif_tx_unlock(dev->net); + } + } + result = work_done; + } + + return result; } static void lan78xx_delayedwork(struct work_struct *work) @@ -3843,7 +4146,7 @@ static void lan78xx_delayedwork(struct work_struct *work) status); } else { clear_bit(EVENT_RX_HALT, &dev->flags); - tasklet_schedule(&dev->bh); + napi_schedule(&dev->napi); } } @@ -3937,6 +4240,8 @@ static void lan78xx_disconnect(struct usb_interface *intf) set_bit(EVENT_DEV_DISCONNECT, &dev->flags); + netif_napi_del(&dev->napi); + udev = interface_to_usbdev(intf); net = dev->net; @@ -3961,6 +4266,9 @@ static void lan78xx_disconnect(struct usb_interface *intf) lan78xx_unbind(dev, intf); + lan78xx_free_tx_resources(dev); + lan78xx_free_rx_resources(dev); + usb_kill_urb(dev->urb_intr); usb_free_urb(dev->urb_intr); @@ -3973,14 +4281,16 @@ static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue) struct lan78xx_net *dev = netdev_priv(net); unlink_urbs(dev, &dev->txq); - tasklet_schedule(&dev->bh); + napi_schedule(&dev->napi); } static netdev_features_t lan78xx_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) + struct lan78xx_net *dev = netdev_priv(netdev); + + if (skb->len > LAN78XX_TSO_SIZE(dev)) features &= ~NETIF_F_GSO_MASK; features = vlan_features_check(skb, features); @@ -4046,12 +4356,31 @@ static int lan78xx_probe(struct usb_interface *intf, skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); - skb_queue_head_init(&dev->done); + skb_queue_head_init(&dev->rxq_done); skb_queue_head_init(&dev->txq_pend); + skb_queue_head_init(&dev->rxq_overflow); mutex_init(&dev->phy_mutex); mutex_init(&dev->dev_mutex); - tasklet_setup(&dev->bh, lan78xx_bh); + ret = lan78xx_urb_config_init(dev); + if (ret < 0) + goto out2; + + ret = lan78xx_alloc_tx_resources(dev); + if (ret < 0) + goto out2; + + ret = lan78xx_alloc_rx_resources(dev); + if (ret < 0) + goto out3; + + /* MTU range: 68 - 9000 */ + netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; + + netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); + + netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT); + INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); init_usb_anchor(&dev->deferred); @@ -4066,27 +4395,27 @@ static int lan78xx_probe(struct usb_interface *intf, if (intf->cur_altsetting->desc.bNumEndpoints < 3) { ret = -ENODEV; - goto out2; + goto out4; } dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { ret = -ENODEV; - goto out2; + goto out4; } dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { ret = -ENODEV; - goto out2; + goto out4; } ep_intr = &intf->cur_altsetting->endpoint[2]; if (!usb_endpoint_is_int_in(&ep_intr->desc)) { ret = -ENODEV; - goto out2; + goto out4; } dev->pipe_intr = usb_rcvintpipe(dev->udev, @@ -4094,30 +4423,25 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_bind(dev, intf); if (ret < 0) - goto out2; - - if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) - netdev->mtu = dev->hard_mtu - netdev->hard_header_len; - - /* MTU range: 68 - 9000 */ - netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; - netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); + goto out4; period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); - if (buf) { - dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->urb_intr) { - ret = -ENOMEM; - kfree(buf); - goto out3; - } else { - usb_fill_int_urb(dev->urb_intr, dev->udev, - dev->pipe_intr, buf, maxp, - intr_complete, dev, period); - dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; - } + if (!buf) { + ret = -ENOMEM; + goto out5; + } + + dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->urb_intr) { + ret = -ENOMEM; + goto out6; + } else { + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); @@ -4125,7 +4449,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* Reject broken descriptors. */ if (dev->maxpacket == 0) { ret = -ENODEV; - goto out4; + goto out6; } /* driver requires remote-wakeup capability during autosuspend. */ @@ -4133,12 +4457,12 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_phy_init(dev); if (ret < 0) - goto out4; + goto out7; ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out5; + goto out8; } usb_set_intfdata(intf, dev); @@ -4153,12 +4477,19 @@ static int lan78xx_probe(struct usb_interface *intf, return 0; -out5: +out8: phy_disconnect(netdev->phydev); -out4: +out7: usb_free_urb(dev->urb_intr); -out3: +out6: + kfree(buf); +out5: lan78xx_unbind(dev, intf); +out4: + netif_napi_del(&dev->napi); + lan78xx_free_rx_resources(dev); +out3: + lan78xx_free_tx_resources(dev); out2: free_netdev(netdev); out1: @@ -4579,8 +4910,7 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev) if (!netif_device_present(dev->net) || !netif_carrier_ok(dev->net) || pipe_halted) { - usb_free_urb(urb); - dev_kfree_skb(skb); + lan78xx_release_tx_buf(dev, skb); continue; } @@ -4590,15 +4920,14 @@ static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev) netif_trans_update(dev->net); lan78xx_queue_skb(&dev->txq, skb, tx_start); } else { - usb_free_urb(urb); - dev_kfree_skb(skb); - if (ret == -EPIPE) { netif_stop_queue(dev->net); pipe_halted = true; } else if (ret == -ENODEV) { netif_device_detach(dev->net); } + + lan78xx_release_tx_buf(dev, skb); } } @@ -4630,8 +4959,7 @@ static int lan78xx_resume(struct usb_interface *intf) if (ret < 0) { if (ret == -ENODEV) netif_device_detach(dev->net); - - netdev_warn(dev->net, "Failed to submit intr URB"); + netdev_warn(dev->net, "Failed to submit intr URB"); } } @@ -4650,14 +4978,14 @@ static int lan78xx_resume(struct usb_interface *intf) if (!pipe_halted && netif_device_present(dev->net) && - (skb_queue_len(&dev->txq) < dev->tx_qlen)) + (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))) netif_start_queue(dev->net); ret = lan78xx_start_tx_path(dev); if (ret < 0) goto out; - tasklet_schedule(&dev->bh); + napi_schedule(&dev->napi); if (!timer_pending(&dev->stat_monitor)) { dev->delta = 1; @@ -4734,6 +5062,10 @@ static const struct usb_device_id products[] = { /* LAN7801 USB Gigabit Ethernet Device */ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID), }, + { + /* ATM2-AF USB Gigabit Ethernet Device */ + USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID), + }, {}, }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 326cc4e749d8..fdda0616704e 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -108,8 +108,16 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { - return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, - 0x0000, index, data, size); + int ret; + + ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, + 0x0000, index, data, size); + if (ret < 0) + return ret; + else if (ret < size) + return -ENODATA; + + return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index c4cd40b090fd..feb247e355f7 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb) goto goon; rx_status = buf[count - 2]; - if (rx_status & 0x1e) { + if (rx_status & 0x1c) { netif_dbg(pegasus, rx_err, net, "RX packet error %x\n", rx_status); net->stats.rx_errors++; - if (rx_status & 0x06) /* long or runt */ + if (rx_status & 0x04) /* runt */ net->stats.rx_length_errors++; if (rx_status & 0x08) net->stats.rx_crc_errors++; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 86b814e99224..f510e8219470 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4a02f33f0643..ee41088c5251 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "12" /* Information for net */ -#define NET_VERSION "11" +#define NET_VERSION "12" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type) ocp_write_word(tp, type, PLA_BP_BA, 0); } +static inline void rtl_reset_ocp_base(struct r8152 *tp) +{ + tp->ocp_base = -1; +} + static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) { u16 data, check; @@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) rtl_phy_patch_request(tp, false, wait); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); - return 0; } @@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, u32 len; u8 *data; + rtl_reset_ocp_base(tp); + if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return; @@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, } } - ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base); + rtl_reset_ocp_base(tp); + rtl_phy_patch_request(tp, false, wait); if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) @@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) ver_addr = __le16_to_cpu(phy_ver->ver.addr); ver = __le16_to_cpu(phy_ver->ver.data); + rtl_reset_ocp_base(tp); + if (sram_read(tp, ver_addr) >= ver) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return 0; @@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) { u16 addr, data; + rtl_reset_ocp_base(tp); + addr = __le16_to_cpu(fix->setting.addr); data = ocp_reg_read(tp, addr); @@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph u32 length; int i, num; + rtl_reset_ocp_base(tp); + num = phy->pre_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), @@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) u32 length, i, num; __le16 *data; + rtl_reset_ocp_base(tp); + mode_reg = __le16_to_cpu(phy->mode_reg); sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre)); sram_write(tp, __le16_to_cpu(phy->ba_reg), @@ -5107,6 +5121,7 @@ post_fw: if (rtl_fw->post_fw) rtl_fw->post_fw(tp); + rtl_reset_ocp_base(tp); strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE); dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); } @@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp) return true; } +static void r8156_mdio_force_mode(struct r8152 *tp) +{ + u16 data; + + /* Select force mode through 0xa5b4 bit 15 + * 0: MDIO force mode + * 1: MMD force mode + */ + data = ocp_reg_read(tp, 0xa5b4); + if (data & BIT(15)) { + data &= ~BIT(15); + ocp_reg_write(tp, 0xa5b4, data); + } +} + static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; @@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp) ocp_data |= ACT_ODMA; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); + r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ @@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp) ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); + r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ @@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf) mutex_lock(&tp->control); + rtl_reset_ocp_base(tp); + if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) ret = rtl8152_runtime_resume(tp); else @@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf) struct r8152 *tp = usb_get_intfdata(intf); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + rtl_reset_ocp_base(tp); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, true); @@ -8983,7 +9018,9 @@ static int rtl8152_set_tunable(struct net_device *netdev, } static void rtl8152_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); @@ -8992,7 +9029,9 @@ static void rtl8152_get_ringparam(struct net_device *netdev, } static int rtl8152_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 4a84f90e377c..247f58cb0f84 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -609,6 +609,11 @@ static const struct usb_device_id products [] = { USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_poll_status_info, }, { + /* Hytera Communications DMR radios' "Radio to PC Network" */ + USB_VENDOR_AND_INTERFACE_INFO(0x238b, + USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long)&rndis_info, +}, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_info, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 20fe4cd8f784..abe0149ed917 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, }; +static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0; unregister_mdio: @@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv; + phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } -static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; + phy_start(dev->net->phydev); - ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; - - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; } -static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; } @@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = { .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 50eb43e5bf45..354a963075c5 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -134,29 +134,22 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { - char *p = (char *)buf; + u8 *p = buf; int i, j; switch(stringset) { case ETH_SS_STATS: memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys)); p += sizeof(ethtool_stats_keys); - for (i = 0; i < dev->real_num_rx_queues; i++) { - for (j = 0; j < VETH_RQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, - "rx_queue_%u_%.18s", - i, veth_rq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } - } - for (i = 0; i < dev->real_num_tx_queues; i++) { - for (j = 0; j < VETH_TQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, - "tx_queue_%u_%.18s", - i, veth_tq_stats_desc[j].desc); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < dev->real_num_rx_queues; i++) + for (j = 0; j < VETH_RQ_STATS_LEN; j++) + ethtool_sprintf(&p, "rx_queue_%u_%.18s", + i, veth_rq_stats_desc[j].desc); + + for (i = 0; i < dev->real_num_tx_queues; i++) + for (j = 0; j < VETH_TQ_STATS_LEN; j++) + ethtool_sprintf(&p, "tx_queue_%u_%.18s", + i, veth_tq_stats_desc[j].desc); break; } } @@ -342,7 +335,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) */ use_napi = rcu_access_pointer(rq->napi) && veth_skb_is_eligible_for_gro(dev, rcv, skb); - skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); @@ -651,7 +643,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq, rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); @@ -801,7 +793,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); @@ -879,8 +871,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, stats->xdp_bytes += skb->len; skb = veth_xdp_rcv_skb(rq, skb, bq, stats); - if (skb) - napi_gro_receive(&rq->xdp_napi, skb); + if (skb) { + if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC)) + netif_receive_skb(skb); + else + napi_gro_receive(&rq->xdp_napi, skb); + } } done++; } @@ -1689,8 +1685,8 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; - peer->gso_max_size = dev->gso_max_size; - peer->gso_max_segs = dev->gso_max_segs; + netif_set_gso_max_size(peer, dev->gso_max_size); + netif_set_gso_max_segs(peer, dev->gso_max_segs); err = register_netdevice(peer); put_net(net); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1771d6e5224f..569eecfbc2cd 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -733,7 +733,7 @@ static struct sk_buff *receive_small(struct net_device *dev, pr_debug("%s: rx error: len %u exceeds max size %d\n", dev->name, len, GOOD_PACKET_LEN); dev->stats.rx_length_errors++; - goto err_len; + goto err; } if (likely(!vi->xdp_enabled)) { @@ -812,7 +812,7 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); @@ -825,10 +825,8 @@ static struct sk_buff *receive_small(struct net_device *dev, skip_xdp: skb = build_skb(buf, buflen); - if (!skb) { - put_page(page); + if (!skb) goto err; - } skb_reserve(skb, headroom - delta); skb_put(skb, len); if (!xdp_prog) { @@ -839,13 +837,12 @@ skip_xdp: if (metasize) skb_metadata_set(skb, metasize); -err: return skb; err_xdp: rcu_read_unlock(); stats->xdp_drops++; -err_len: +err: stats->drops++; put_page(page); xdp_xmit: @@ -1025,7 +1022,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(vi->dev, xdp_prog, act); @@ -2174,7 +2171,9 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) } static void virtnet_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct virtnet_info *vi = netdev_priv(dev); @@ -2694,7 +2693,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", txqueue, sq->name, sq->vq->index, sq->vq->name, - jiffies_to_usecs(jiffies - txq->trans_start)); + jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); } static const struct net_device_ops virtnet_netdev = { @@ -3423,7 +3422,6 @@ static struct virtio_driver virtio_net_driver = { .feature_table_size = ARRAY_SIZE(features), .feature_table_legacy = features_legacy, .feature_table_size_legacy = ARRAY_SIZE(features_legacy), - .suppress_used_validation = true, .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 14fae317bc70..d9d90baac72a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3159,14 +3159,14 @@ out: static void -vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) +vmxnet3_declare_features(struct vmxnet3_adapter *adapter) { struct net_device *netdev = adapter->netdev; netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_LRO; + NETIF_F_LRO | NETIF_F_HIGHDMA; if (VMXNET3_VERSION_GE_4(adapter)) { netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | @@ -3179,8 +3179,6 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) NETIF_F_GSO_UDP_TUNNEL_CSUM; } - if (dma64) - netdev->hw_features |= NETIF_F_HIGHDMA; netdev->vlan_features = netdev->hw_features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); @@ -3261,7 +3259,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) #ifdef CONFIG_PCI_MSI if (adapter->intr.type == VMXNET3_IT_MSIX) { - int i, nvec; + int i, nvec, nvec_allocated; nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? 1 : adapter->num_tx_queues; @@ -3274,14 +3272,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) for (i = 0; i < nvec; i++) adapter->intr.msix_entries[i].entry = i; - nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); - if (nvec < 0) + nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec); + if (nvec_allocated < 0) goto msix_err; /* If we cannot allocate one MSIx vector per queue * then limit the number of rx queues to 1 */ - if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { + if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT && + nvec != VMXNET3_LINUX_MIN_MSIX_VECT) { if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE || adapter->num_rx_queues != 1) { adapter->share_intr = VMXNET3_INTR_TXSHARE; @@ -3291,14 +3290,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) } } - adapter->intr.num_intrs = nvec; + adapter->intr.num_intrs = nvec_allocated; return; msix_err: /* If we cannot allocate MSIx vectors use only one rx queue */ dev_info(&adapter->pdev->dev, "Failed to enable MSI-X, error %d. " - "Limiting #rx queues to 1, try MSI.\n", nvec); + "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); adapter->intr.type = VMXNET3_IT_MSI; } @@ -3396,7 +3395,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, #endif }; int err; - bool dma64; u32 ver; struct net_device *netdev; struct vmxnet3_adapter *adapter; @@ -3438,15 +3436,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) == 0) { - dma64 = true; - } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "dma_set_mask failed\n"); - goto err_set_mask; - } - dma64 = false; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "dma_set_mask failed\n"); + goto err_set_mask; } spin_lock_init(&adapter->cmd_lock); @@ -3613,7 +3606,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, } SET_NETDEV_DEV(netdev, &pdev->dev); - vmxnet3_declare_features(adapter, dma64); + vmxnet3_declare_features(adapter); adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? VMXNET3_DEF_RXDATA_DESC_SIZE : 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 16f3a2057b90..3172d46c0335 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -575,10 +575,11 @@ vmxnet3_get_link_ksettings(struct net_device *netdev, return 0; } - static void vmxnet3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); @@ -595,10 +596,11 @@ vmxnet3_get_ringparam(struct net_device *netdev, param->rx_jumbo_pending = adapter->rx_ring2_size; } - static int vmxnet3_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ccf677015d5b..e0b1ab99a359 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -34,6 +34,7 @@ #include <net/addrconf.h> #include <net/l3mdev.h> #include <net/fib_rules.h> +#include <net/sch_generic.h> #include <net/netns/generic.h> #include <net/netfilter/nf_conntrack.h> @@ -497,6 +498,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); ret = vrf_ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@ -579,6 +581,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, RT_SCOPE_LINK); } + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; @@ -768,8 +771,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip6_out_direct_finish); @@ -790,6 +791,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return vrf_ip6_out_direct(vrf_dev, sk, skb); @@ -812,9 +815,9 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) */ if (rt6) { dst = &rt6->dst; - dev_put(dst->dev); + dev_replace_track(dst->dev, net->loopback_dev, + &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; - dev_hold(dst->dev); dst_release(dst); } } @@ -998,8 +1001,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, skb->dev = vrf_dev; - vrf_nf_set_untracked(skb); - err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, vrf_dev, vrf_ip_out_direct_finish); @@ -1021,6 +1022,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; + vrf_nf_set_untracked(skb); + if (qdisc_tx_is_default(vrf_dev) || IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return vrf_ip_out_direct(vrf_dev, sk, skb); @@ -1059,9 +1062,9 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) */ if (rth) { dst = &rth->dst; - dev_put(dst->dev); + dev_replace_track(dst->dev, net->loopback_dev, + &dst->dev_tracker, GFP_KERNEL); dst->dev = net->loopback_dev; - dev_hold(dst->dev); dst_release(dst); } } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 141635a35c28..359d16780dbb 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -17,6 +17,7 @@ #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> +#include <net/gro.h> #include <net/ipv6_stubs.h> #include <net/ip.h> #include <net/icmp.h> @@ -3233,7 +3234,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_fdb_dump = vxlan_fdb_dump, .ndo_fdb_get = vxlan_fdb_get, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, - .ndo_change_proto_down = dev_change_proto_down_generic, }; static const struct net_device_ops vxlan_netdev_raw_ops = { @@ -3304,7 +3304,7 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_RXCSUM; dev->hw_features |= NETIF_F_GSO_SOFTWARE; netif_keep_dst(dev); - dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_QUEUE | IFF_CHANGE_PROTO_DOWN; /* MTU range: 68 - 65535 */ dev->min_mtu = ETH_MIN_MTU; @@ -3747,7 +3747,7 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, if (!conf->dst_port) { if (conf->flags & VXLAN_F_GPE) - conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */ + conf->dst_port = htons(IANA_VXLAN_GPE_UDP_PORT); else conf->dst_port = htons(vxlan_port); } @@ -3810,8 +3810,8 @@ static void vxlan_config_apply(struct net_device *dev, if (lowerdev) { dst->remote_ifindex = conf->remote_ifindex; - dev->gso_max_size = lowerdev->gso_max_size; - dev->gso_max_segs = lowerdev->gso_max_segs; + netif_set_gso_max_size(dev, lowerdev->gso_max_size); + netif_set_gso_max_segs(dev, lowerdev->gso_max_segs); needed_headroom = lowerdev->hard_header_len; needed_headroom += lowerdev->needed_headroom; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index cda1b4ce6b21..5ae2d27b5da9 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -306,9 +306,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) else bd_status = R_E_S | R_I_S | R_W_S; - iowrite16be(bd_status, &priv->rx_bd_base[i].status); - iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, - &priv->rx_bd_base[i].buf); + priv->rx_bd_base[i].status = cpu_to_be16(bd_status); + priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH); } for (i = 0; i < TX_BD_RING_LEN; i++) { @@ -317,10 +316,10 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) else bd_status = T_I_S | T_TC_S | T_W_S; - iowrite16be(bd_status, &priv->tx_bd_base[i].status); - iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, - &priv->tx_bd_base[i].buf); + priv->tx_bd_base[i].status = cpu_to_be16(bd_status); + priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH); } + dma_wmb(); return 0; @@ -352,10 +351,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; - struct qe_bd __iomem *bd; + struct qe_bd *bd; u16 bd_status; unsigned long flags; - u16 *proto_head; + __be16 *proto_head; switch (dev->type) { case ARPHRD_RAWHDLC: @@ -368,14 +367,14 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) skb_push(skb, HDLC_HEAD_LEN); - proto_head = (u16 *)skb->data; + proto_head = (__be16 *)skb->data; *proto_head = htons(DEFAULT_HDLC_HEAD); dev->stats.tx_bytes += skb->len; break; case ARPHRD_PPP: - proto_head = (u16 *)skb->data; + proto_head = (__be16 *)skb->data; if (*proto_head != htons(DEFAULT_PPP_HEAD)) { dev->stats.tx_dropped++; dev_kfree_skb(skb); @@ -398,9 +397,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&priv->lock, flags); + dma_rmb(); /* Start from the next BD that should be filled */ bd = priv->curtx_bd; - bd_status = ioread16be(&bd->status); + bd_status = be16_to_cpu(bd->status); /* Save the skb pointer so we can free it later */ priv->tx_skbuff[priv->skb_curtx] = skb; @@ -415,8 +415,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) /* set bd status and length */ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; - iowrite16be(skb->len, &bd->length); - iowrite16be(bd_status, &bd->status); + bd->length = cpu_to_be16(skb->len); + bd->status = cpu_to_be16(bd_status); /* Move to next BD in the ring */ if (!(bd_status & T_W_S)) @@ -458,8 +458,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) u16 bd_status; int tx_restart = 0; + dma_rmb(); bd = priv->dirty_tx; - bd_status = ioread16be(&bd->status); + bd_status = be16_to_cpu(bd->status); /* Normal processing. */ while ((bd_status & T_R_S) == 0) { @@ -503,7 +504,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) bd += 1; else bd = priv->tx_bd_base; - bd_status = ioread16be(&bd->status); + bd_status = be16_to_cpu(bd->status); } priv->dirty_tx = bd; @@ -524,8 +525,9 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) u16 length, howmany = 0; u8 *bdbuffer; + dma_rmb(); bd = priv->currx_bd; - bd_status = ioread16be(&bd->status); + bd_status = be16_to_cpu(bd->status); /* while there are received buffers and BD is full (~R_E) */ while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { @@ -549,7 +551,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) } bdbuffer = priv->rx_buffer + (priv->currx_bdnum * MAX_RX_BUF_LENGTH); - length = ioread16be(&bd->length); + length = be16_to_cpu(bd->length); switch (dev->type) { case ARPHRD_RAWHDLC: @@ -593,7 +595,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) netif_receive_skb(skb); recycle: - iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status); + bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S); /* update to point at the next bd */ if (bd_status & R_W_S) { @@ -608,8 +610,9 @@ recycle: bd += 1; } - bd_status = ioread16be(&bd->status); + bd_status = be16_to_cpu(bd->status); } + dma_rmb(); priv->currx_bd = bd; return howmany; @@ -721,7 +724,7 @@ static int uhdlc_open(struct net_device *dev) /* Enable the TDM port */ if (priv->tsa) - utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); + qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); priv->hdlc_busy = 1; netif_device_attach(priv->ndev); @@ -812,7 +815,7 @@ static int uhdlc_close(struct net_device *dev) (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); if (priv->tsa) - utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); + qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); @@ -848,7 +851,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, #ifdef CONFIG_PM static void store_clk_config(struct ucc_hdlc_private *priv) { - struct qe_mux *qe_mux_reg = &qe_immr->qmx; + struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx; /* store si clk */ priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); @@ -863,7 +866,7 @@ static void store_clk_config(struct ucc_hdlc_private *priv) static void resume_clk_config(struct ucc_hdlc_private *priv) { - struct qe_mux *qe_mux_reg = &qe_immr->qmx; + struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx; memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); @@ -990,9 +993,8 @@ static int uhdlc_resume(struct device *dev) else bd_status = R_E_S | R_I_S | R_W_S; - iowrite16be(bd_status, &priv->rx_bd_base[i].status); - iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, - &priv->rx_bd_base[i].buf); + priv->rx_bd_base[i].status = cpu_to_be16(bd_status); + priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH); } for (i = 0; i < TX_BD_RING_LEN; i++) { @@ -1001,10 +1003,10 @@ static int uhdlc_resume(struct device *dev) else bd_status = T_I_S | T_TC_S | T_W_S; - iowrite16be(bd_status, &priv->tx_bd_base[i].status); - iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, - &priv->tx_bd_base[i].buf); + priv->tx_bd_base[i].status = cpu_to_be16(bd_status); + priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH); } + dma_wmb(); /* if hdlc is busy enable TX and RX */ if (priv->hdlc_busy == 1) { @@ -1018,7 +1020,7 @@ static int uhdlc_resume(struct device *dev) /* Enable the TDM port */ if (priv->tsa) - utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); + qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port); } napi_enable(&priv->napi); diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 88a36a069311..0b7d9f2f2b8b 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -17,13 +17,19 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> -#include <linux/platform_data/wan_ixp4xx_hss.h> #include <linux/poll.h> #include <linux/slab.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> #include <linux/soc/ixp4xx/npe.h> #include <linux/soc/ixp4xx/qmgr.h> #include <linux/soc/ixp4xx/cpu.h> +/* This is what all IXP4xx platforms we know uses, if more frequencies + * are needed, we need to migrate to the clock framework. + */ +#define IXP4XX_TIMER_FREQ 66666000 + #define DEBUG_DESC 0 #define DEBUG_RX 0 #define DEBUG_TX 0 @@ -50,7 +56,6 @@ #define NAPI_WEIGHT 16 /* Queue IDs */ -#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */ #define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */ #define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */ #define HSS0_PKT_TX1_QUEUE 15 @@ -62,7 +67,6 @@ #define HSS0_PKT_RXFREE3_QUEUE 21 #define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */ -#define HSS1_CHL_RXTRIG_QUEUE 10 #define HSS1_PKT_RX_QUEUE 0 #define HSS1_PKT_TX0_QUEUE 5 #define HSS1_PKT_TX1_QUEUE 6 @@ -252,9 +256,19 @@ typedef void buffer_t; struct port { struct device *dev; struct npe *npe; + unsigned int txreadyq; + unsigned int rxtrigq; + unsigned int rxfreeq; + unsigned int rxq; + unsigned int txq; + unsigned int txdoneq; + struct gpio_desc *cts; + struct gpio_desc *rts; + struct gpio_desc *dcd; + struct gpio_desc *dtr; + struct gpio_desc *clk_internal; struct net_device *netdev; struct napi_struct napi; - struct hss_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ dma_addr_t desc_tab_phys; @@ -322,14 +336,6 @@ static int ports_open; static struct dma_pool *dma_pool; static DEFINE_SPINLOCK(npe_lock); -static const struct { - int tx, txdone, rx, rxfree; -} queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE, - HSS0_PKT_RXFREE0_QUEUE}, - {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE, - HSS1_PKT_RXFREE0_QUEUE}, -}; - /***************************************************************************** * utility functions ****************************************************************************/ @@ -645,7 +651,7 @@ static void hss_hdlc_rx_irq(void *pdev) #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); #endif - qmgr_disable_irq(queue_ids[port->id].rx); + qmgr_disable_irq(port->rxq); napi_schedule(&port->napi); } @@ -653,8 +659,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; - unsigned int rxq = queue_ids[port->id].rx; - unsigned int rxfreeq = queue_ids[port->id].rxfree; + unsigned int rxq = port->rxq; + unsigned int rxfreeq = port->rxfreeq; int received = 0; #if DEBUG_RX @@ -795,7 +801,7 @@ static void hss_hdlc_txdone_irq(void *pdev) #if DEBUG_TX printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n"); #endif - while ((n_desc = queue_get_desc(queue_ids[port->id].txdone, + while ((n_desc = queue_get_desc(port->txdoneq, port, 1)) >= 0) { struct desc *desc; int start; @@ -813,8 +819,8 @@ static void hss_hdlc_txdone_irq(void *pdev) free_buffer_irq(port->tx_buff_tab[n_desc]); port->tx_buff_tab[n_desc] = NULL; - start = qmgr_stat_below_low_watermark(port->plat->txreadyq); - queue_put_desc(port->plat->txreadyq, + start = qmgr_stat_below_low_watermark(port->txreadyq); + queue_put_desc(port->txreadyq, tx_desc_phys(port, n_desc), desc); if (start) { /* TX-ready queue was empty */ #if DEBUG_TX @@ -829,7 +835,7 @@ static void hss_hdlc_txdone_irq(void *pdev) static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) { struct port *port = dev_to_port(dev); - unsigned int txreadyq = port->plat->txreadyq; + unsigned int txreadyq = port->txreadyq; int len, offset, bytes, n; void *mem; u32 phys; @@ -889,7 +895,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) desc->buf_len = desc->pkt_len = len; wmb(); - queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); + queue_put_desc(port->txq, tx_desc_phys(port, n), desc); if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ #if DEBUG_TX @@ -916,40 +922,40 @@ static int request_hdlc_queues(struct port *port) { int err; - err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0, + err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0, "%s:RX-free", port->netdev->name); if (err) return err; - err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0, + err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0, "%s:RX", port->netdev->name); if (err) goto rel_rxfree; - err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0, + err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0, "%s:TX", port->netdev->name); if (err) goto rel_rx; - err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, + err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0, "%s:TX-ready", port->netdev->name); if (err) goto rel_tx; - err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0, + err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0, "%s:TX-done", port->netdev->name); if (err) goto rel_txready; return 0; rel_txready: - qmgr_release_queue(port->plat->txreadyq); + qmgr_release_queue(port->txreadyq); rel_tx: - qmgr_release_queue(queue_ids[port->id].tx); + qmgr_release_queue(port->txq); rel_rx: - qmgr_release_queue(queue_ids[port->id].rx); + qmgr_release_queue(port->rxq); rel_rxfree: - qmgr_release_queue(queue_ids[port->id].rxfree); + qmgr_release_queue(port->rxfreeq); printk(KERN_DEBUG "%s: unable to request hardware queues\n", port->netdev->name); return err; @@ -957,11 +963,11 @@ rel_rxfree: static void release_hdlc_queues(struct port *port) { - qmgr_release_queue(queue_ids[port->id].rxfree); - qmgr_release_queue(queue_ids[port->id].rx); - qmgr_release_queue(queue_ids[port->id].txdone); - qmgr_release_queue(queue_ids[port->id].tx); - qmgr_release_queue(port->plat->txreadyq); + qmgr_release_queue(port->rxfreeq); + qmgr_release_queue(port->rxq); + qmgr_release_queue(port->txdoneq); + qmgr_release_queue(port->txq); + qmgr_release_queue(port->txreadyq); } static int init_hdlc_queues(struct port *port) @@ -1046,11 +1052,24 @@ static void destroy_hdlc_queues(struct port *port) } } +static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data) +{ + struct net_device *dev = data; + struct port *port = dev_to_port(dev); + int val; + + val = gpiod_get_value(port->dcd); + hss_hdlc_set_carrier(dev, val); + + return IRQ_HANDLED; +} + static int hss_hdlc_open(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long flags; int i, err = 0; + int val; err = hdlc_open(dev); if (err) @@ -1069,32 +1088,44 @@ static int hss_hdlc_open(struct net_device *dev) goto err_destroy_queues; spin_lock_irqsave(&npe_lock, flags); - if (port->plat->open) { - err = port->plat->open(port->id, dev, hss_hdlc_set_carrier); - if (err) - goto err_unlock; + + /* Set the carrier, the GPIO is flagged active low so this will return + * 1 if DCD is asserted. + */ + val = gpiod_get_value(port->dcd); + hss_hdlc_set_carrier(dev, val); + + /* Set up an IRQ for DCD */ + err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev); + if (err) { + dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err); + goto err_unlock; } + /* GPIOs are flagged active low so this asserts DTR and RTS */ + gpiod_set_value(port->dtr, 1); + gpiod_set_value(port->rts, 1); + spin_unlock_irqrestore(&npe_lock, flags); /* Populate queues with buffers, no failure after this point */ for (i = 0; i < TX_DESCS; i++) - queue_put_desc(port->plat->txreadyq, + queue_put_desc(port->txreadyq, tx_desc_phys(port, i), tx_desc_ptr(port, i)); for (i = 0; i < RX_DESCS; i++) - queue_put_desc(queue_ids[port->id].rxfree, + queue_put_desc(port->rxfreeq, rx_desc_phys(port, i), rx_desc_ptr(port, i)); napi_enable(&port->napi); netif_start_queue(dev); - qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, + qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_rx_irq, dev); - qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, + qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_txdone_irq, dev); - qmgr_enable_irq(queue_ids[port->id].txdone); + qmgr_enable_irq(port->txdoneq); ports_open++; @@ -1125,15 +1156,15 @@ static int hss_hdlc_close(struct net_device *dev) spin_lock_irqsave(&npe_lock, flags); ports_open--; - qmgr_disable_irq(queue_ids[port->id].rx); + qmgr_disable_irq(port->rxq); netif_stop_queue(dev); napi_disable(&port->napi); hss_stop_hdlc(port); - while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) + while (queue_get_desc(port->rxfreeq, port, 0) >= 0) buffs--; - while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) + while (queue_get_desc(port->rxq, port, 0) >= 0) buffs--; if (buffs) @@ -1141,12 +1172,12 @@ static int hss_hdlc_close(struct net_device *dev) buffs); buffs = TX_DESCS; - while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) + while (queue_get_desc(port->txq, port, 1) >= 0) buffs--; /* cancel TX */ i = 0; do { - while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) + while (queue_get_desc(port->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; @@ -1159,10 +1190,12 @@ static int hss_hdlc_close(struct net_device *dev) if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif - qmgr_disable_irq(queue_ids[port->id].txdone); + qmgr_disable_irq(port->txdoneq); - if (port->plat->close) - port->plat->close(port->id, dev); + free_irq(gpiod_to_irq(port->dcd), dev); + /* GPIOs are flagged active low so this de-asserts DTR and RTS */ + gpiod_set_value(port->dtr, 0); + gpiod_set_value(port->rts, 0); spin_unlock_irqrestore(&npe_lock, flags); destroy_hdlc_queues(port); @@ -1254,6 +1287,21 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg) } } +static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type) +{ + switch (clock_type) { + case CLOCK_DEFAULT: + case CLOCK_EXT: + gpiod_set_value(port->clk_internal, 0); + return CLOCK_EXT; + case CLOCK_INT: + gpiod_set_value(port->clk_internal, 1); + return CLOCK_INT; + default: + return -EINVAL; + } +} + static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) { const size_t size = sizeof(sync_serial_settings); @@ -1286,8 +1334,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) return -EFAULT; clk = new_line.clock_type; - if (port->plat->set_clock) - clk = port->plat->set_clock(port->id, clk); + hss_hdlc_set_clock(port, clk); if (clk != CLOCK_EXT && clk != CLOCK_INT) return -EINVAL; /* No such clock setting */ @@ -1297,7 +1344,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs) port->clock_type = clk; /* Update settings */ if (clk == CLOCK_INT) { - find_best_clock(port->plat->timer_freq, + find_best_clock(IXP4XX_TIMER_FREQ, new_line.clock_rate, &port->clock_rate, &port->clock_reg); } else { @@ -1335,77 +1382,139 @@ static const struct net_device_ops hss_hdlc_ops = { .ndo_siocwandev = hss_hdlc_ioctl, }; -static int hss_init_one(struct platform_device *pdev) +static int ixp4xx_hss_probe(struct platform_device *pdev) { + struct of_phandle_args queue_spec; + struct of_phandle_args npe_spec; + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct device_node *np; struct port *port; - struct net_device *dev; hdlc_device *hdlc; int err; - port = kzalloc(sizeof(*port), GFP_KERNEL); + np = dev->of_node; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; - port->npe = npe_request(0); + err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, + &npe_spec); + if (err) + return dev_err_probe(dev, err, "no NPE engine specified\n"); + /* NPE ID 0x00, 0x10, 0x20... */ + port->npe = npe_request(npe_spec.args[0] << 4); if (!port->npe) { - err = -ENODEV; - goto err_free; + dev_err(dev, "unable to obtain NPE instance\n"); + return -ENODEV; } - dev = alloc_hdlcdev(port); + /* Get the TX ready queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no txready queue phandle\n"); + port->txreadyq = queue_spec.args[0]; + /* Get the RX trig queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no rxtrig queue phandle\n"); + port->rxtrigq = queue_spec.args[0]; + /* Get the RX queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX queue phandle\n"); + port->rxq = queue_spec.args[0]; + /* Get the TX queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX queue phandle\n"); + port->txq = queue_spec.args[0]; + /* Get the RX free queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no RX free queue phandle\n"); + port->rxfreeq = queue_spec.args[0]; + /* Get the TX done queue as resource from queue manager */ + err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0, + &queue_spec); + if (err) + return dev_err_probe(dev, err, "no TX done queue phandle\n"); + port->txdoneq = queue_spec.args[0]; + + /* Obtain all the line control GPIOs */ + port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW); + if (IS_ERR(port->cts)) + return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n"); + port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW); + if (IS_ERR(port->rts)) + return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n"); + port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN); + if (IS_ERR(port->dcd)) + return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n"); + port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW); + if (IS_ERR(port->dtr)) + return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n"); + port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW); + if (IS_ERR(port->clk_internal)) + return dev_err_probe(dev, PTR_ERR(port->clk_internal), + "unable to get CLK internal GPIO\n"); + + ndev = alloc_hdlcdev(port); port->netdev = alloc_hdlcdev(port); if (!port->netdev) { err = -ENOMEM; goto err_plat; } - SET_NETDEV_DEV(dev, &pdev->dev); - hdlc = dev_to_hdlc(dev); + SET_NETDEV_DEV(ndev, &pdev->dev); + hdlc = dev_to_hdlc(ndev); hdlc->attach = hss_hdlc_attach; hdlc->xmit = hss_hdlc_xmit; - dev->netdev_ops = &hss_hdlc_ops; - dev->tx_queue_len = 100; + ndev->netdev_ops = &hss_hdlc_ops; + ndev->tx_queue_len = 100; port->clock_type = CLOCK_EXT; port->clock_rate = 0; port->clock_reg = CLK42X_SPEED_2048KHZ; port->id = pdev->id; port->dev = &pdev->dev; - port->plat = pdev->dev.platform_data; - netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); + netif_napi_add(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); - err = register_hdlc_device(dev); + err = register_hdlc_device(ndev); if (err) goto err_free_netdev; platform_set_drvdata(pdev, port); - netdev_info(dev, "initialized\n"); + netdev_info(ndev, "initialized\n"); return 0; err_free_netdev: - free_netdev(dev); + free_netdev(ndev); err_plat: npe_release(port->npe); -err_free: - kfree(port); return err; } -static int hss_remove_one(struct platform_device *pdev) +static int ixp4xx_hss_remove(struct platform_device *pdev) { struct port *port = platform_get_drvdata(pdev); unregister_hdlc_device(port->netdev); free_netdev(port->netdev); npe_release(port->npe); - kfree(port); return 0; } static struct platform_driver ixp4xx_hss_driver = { .driver.name = DRV_NAME, - .probe = hss_init_one, - .remove = hss_remove_one, + .probe = ixp4xx_hss_probe, + .remove = ixp4xx_hss_remove, }; static int __init hss_init_module(void) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index ed687bf6ec47..6a142dc85c37 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -550,7 +550,7 @@ static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, (timeout-- > 0)) cpu_relax(); - printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout); + printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout); for(pos = 0; pos < xc.len; pos++){ switch(data[pos]){ diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index b7197e80f226..9a4c8ff32d9d 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, return exact; } -static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) { node->parent_bit_packed = (unsigned long)parent | bit; rcu_assign_pointer(*parent, node); diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 551ddaaaf540..a46067c38bf5 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; + struct sk_buff *skb; mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { @@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev) wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); } mutex_unlock(&wg->device_update_lock); - skb_queue_purge(&wg->incoming_handshakes); + while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) + kfree_skb(skb); + atomic_set(&wg->handshake_queue_len, 0); wg_socket_reinit(wg, NULL, NULL); return 0; } @@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); - wg_packet_queue_free(&wg->decrypt_queue); - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->handshake_queue, true); + wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, false); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); - skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); - free_percpu(wg->incoming_handshakes_worker); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); @@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); - skb_queue_head_init(&wg->incoming_handshakes); wg_allowedips_init(&wg->peer_allowedips); wg_cookie_checker_init(&wg->cookie_checker, wg); INIT_LIST_HEAD(&wg->peer_list); @@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (!dev->tstats) goto err_free_index_hashtable; - wg->incoming_handshakes_worker = - wg_packet_percpu_multicore_worker_alloc( - wg_packet_handshake_receive_worker, wg); - if (!wg->incoming_handshakes_worker) - goto err_free_tstats; - wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_receive_wq) - goto err_free_incoming_handshakes; + goto err_free_tstats; wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); @@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (ret < 0) goto err_free_encrypt_queue; - ret = wg_ratelimiter_init(); + ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, + MAX_QUEUED_INCOMING_HANDSHAKES); if (ret < 0) goto err_free_decrypt_queue; + ret = wg_ratelimiter_init(); + if (ret < 0) + goto err_free_handshake_queue; + ret = register_netdevice(dev); if (ret < 0) goto err_uninit_ratelimiter; @@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, err_uninit_ratelimiter: wg_ratelimiter_uninit(); +err_free_handshake_queue: + wg_packet_queue_free(&wg->handshake_queue, false); err_free_decrypt_queue: - wg_packet_queue_free(&wg->decrypt_queue); + wg_packet_queue_free(&wg->decrypt_queue, false); err_free_encrypt_queue: - wg_packet_queue_free(&wg->encrypt_queue); + wg_packet_queue_free(&wg->encrypt_queue, false); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: destroy_workqueue(wg->handshake_send_wq); err_destroy_handshake_receive: destroy_workqueue(wg->handshake_receive_wq); -err_free_incoming_handshakes: - free_percpu(wg->incoming_handshakes_worker); err_free_tstats: free_percpu(dev->tstats); err_free_index_hashtable: @@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = { static void wg_netns_pre_exit(struct net *net) { struct wg_device *wg; + struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { @@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net) mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg_socket_reinit(wg, NULL, NULL); + list_for_each_entry(peer, &wg->peer_list, peer_list) + wg_socket_clear_peer_endpoint_src(peer); mutex_unlock(&wg->device_update_lock); } } diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index 854bc3d97150..43c7cebbf50b 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -39,21 +39,18 @@ struct prev_queue { struct wg_device { struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue; + struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; struct sock __rcu *sock4, *sock6; struct net __rcu *creating_net; struct noise_static_identity static_identity; - struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; - struct workqueue_struct *packet_crypt_wq; - struct sk_buff_head incoming_handshakes; - int incoming_handshake_cpu; - struct multicore_worker __percpu *incoming_handshakes_worker; + struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; struct cookie_checker cookie_checker; struct pubkey_hashtable *peer_hashtable; struct index_hashtable *index_hashtable; struct allowedips peer_allowedips; struct mutex device_update_lock, socket_update_lock; struct list_head device_list, peer_list; + atomic_t handshake_queue_len; unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c index 75dbe77b0b4b..ee4da9ab8013 100644 --- a/drivers/net/wireguard/main.c +++ b/drivers/net/wireguard/main.c @@ -17,7 +17,7 @@ #include <linux/genetlink.h> #include <net/rtnetlink.h> -static int __init mod_init(void) +static int __init wg_mod_init(void) { int ret; @@ -60,7 +60,7 @@ err_allowedips: return ret; } -static void __exit mod_exit(void) +static void __exit wg_mod_exit(void) { wg_genetlink_uninit(); wg_device_uninit(); @@ -68,8 +68,8 @@ static void __exit mod_exit(void) wg_allowedips_slab_uninit(); } -module_init(mod_init); -module_exit(mod_exit); +module_init(wg_mod_init); +module_exit(wg_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("WireGuard secure network tunnel"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 48e7b982a307..1de413b19e34 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, return 0; } -void wg_packet_queue_free(struct crypt_queue *queue) +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); - WARN_ON(!__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, NULL); + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 4ef2944a68bc..583adb37ee1e 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -23,7 +23,7 @@ struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len); -void wg_packet_queue_free(struct crypt_queue *queue); +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); @@ -79,9 +79,7 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) u8 sw_hash = skb->sw_hash; u32 hash = skb->hash; skb_scrub_packet(skb, true); - memset(&skb->headers_start, 0, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); + memset(&skb->headers, 0, sizeof(skb->headers)); if (encapsulating) { skb->l4_hash = l4_hash; skb->sw_hash = sw_hash; diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c index 3fedd1d21f5e..dd55e5c26f46 100644 --- a/drivers/net/wireguard/ratelimiter.c +++ b/drivers/net/wireguard/ratelimiter.c @@ -176,12 +176,12 @@ int wg_ratelimiter_init(void) (1U << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); + table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); if (unlikely(!table_v4)) goto err_kmemcache; #if IS_ENABLED(CONFIG_IPV6) - table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); + table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); if (unlikely(!table_v6)) { kvfree(table_v4); goto err_kmemcache; diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 7dc84bcca261..7b8df406c773 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg, return; } - under_load = skb_queue_len(&wg->incoming_handshakes) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; + under_load = atomic_read(&wg->handshake_queue_len) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); } else if (last_under_load) { @@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg, void wg_packet_handshake_receive_worker(struct work_struct *work) { - struct wg_device *wg = container_of(work, struct multicore_worker, - work)->ptr; + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); struct sk_buff *skb; - while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { wg_receive_handshake_packet(wg, skb); dev_kfree_skb(skb); + atomic_dec(&wg->handshake_queue_len); cond_resched(); } } @@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { - int cpu; - - if (skb_queue_len(&wg->incoming_handshakes) > - MAX_QUEUED_INCOMING_HANDSHAKES || - unlikely(!rng_is_initialized())) { + int cpu, ret = -EBUSY; + + if (unlikely(!rng_is_initialized())) + goto drop; + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); + } + } else + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); + if (ret) { + drop: net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", wg->dev->name, skb); goto err; } - skb_queue_tail(&wg->incoming_handshakes, skb); - /* Queues up a call to packet_process_queued_handshake_ - * packets(skb): - */ - cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); + atomic_inc(&wg->handshake_queue_len); + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ queue_work_on(cpu, wg->handshake_receive_wq, - &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); break; } case cpu_to_le32(MESSAGE_DATA): diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 8c496b747108..6f07b949cb81 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) { write_lock_bh(&peer->endpoint_lock); memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); - dst_cache_reset(&peer->endpoint_cache); + dst_cache_reset_now(&peer->endpoint_cache); write_unlock_bh(&peer->endpoint_lock); } diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 0e9bad33fac8..141c1b5a7b1f 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb) ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } + if (!cmd->odata) { + ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); + return; + } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 5935e0973d14..8f5b8eb368fa 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -12,6 +12,7 @@ #include <linux/dmi.h> #include <linux/ctype.h> #include <linux/pm_qos.h> +#include <linux/nvmem-consumer.h> #include <asm/byteorder.h> #include "core.h" @@ -89,6 +90,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, }, @@ -124,6 +126,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, }, @@ -160,6 +163,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -190,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .uart_pin_workaround = true, .tx_stats_over_pktlog = false, + .credit_size_workaround = false, .bmi_large_size_download = true, .supports_peer_stats_info = true, .dynamic_sar_support = true, @@ -226,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -261,6 +267,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -296,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -334,6 +342,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .supports_peer_stats_info = true, .dynamic_sar_support = true, @@ -376,6 +385,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -424,6 +434,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -469,6 +480,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -504,6 +516,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -541,6 +554,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -570,6 +584,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .uart_pin_workaround = true, + .credit_size_workaround = true, .dynamic_sar_support = false, }, { @@ -611,6 +626,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, }, @@ -639,6 +655,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = true, .hw_filter_reset_required = false, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = true, }, @@ -714,6 +731,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar) static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) { + bool mtu_workaround = ar->hw_params.credit_size_workaround; int ret; u32 param = 0; @@ -731,7 +749,7 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) + if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround) param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; else param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; @@ -935,7 +953,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) } if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || - ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID; else bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID; @@ -1726,7 +1745,8 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* As of now pre-cal is valid for 10_4 variants */ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || - ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE || + ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM) bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); @@ -1853,6 +1873,39 @@ out_free: return ret; } +static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + int ret; + + cell = devm_nvmem_cell_get(ar->dev, cell_name); + if (IS_ERR(cell)) { + ret = PTR_ERR(cell); + return ret; + } + + buf = nvmem_cell_read(cell, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + if (ar->hw_params.cal_data_len != len) { + kfree(buf); + ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n", + cell_name, len, ar->hw_params.cal_data_len); + return -EMSGSIZE; + } + + ret = ath10k_download_board_data(ar, buf, len); + kfree(buf); + if (ret) + ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n", + cell_name, ret); + + return ret; +} + int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, struct ath10k_fw_file *fw_file) { @@ -2087,6 +2140,18 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar) { int ret; + ret = ath10k_download_cal_nvmem(ar, "pre-calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM; + goto success; + } else if (ret == -EPROBE_DEFER) { + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a pre-calibration nvmem-cell, try file next: %d\n", + ret); + ret = ath10k_download_cal_file(ar, ar->pre_cal_file); if (ret == 0) { ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE; @@ -2153,6 +2218,18 @@ static int ath10k_download_cal_data(struct ath10k *ar) "pre cal download procedure failed, try cal file: %d\n", ret); + ret = ath10k_download_cal_nvmem(ar, "calibration"); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_NVMEM; + goto done; + } else if (ret == -EPROBE_DEFER) { + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration nvmem-cell, try file next: %d\n", + ret); + ret = ath10k_download_cal_file(ar, ar->cal_file); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_FILE; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5aeff2d9f6cf..9f6680b3be0a 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -877,8 +877,10 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_FILE, ATH10K_CAL_MODE_OTP, ATH10K_CAL_MODE_DT, + ATH10K_CAL_MODE_NVMEM, ATH10K_PRE_CAL_MODE_FILE, ATH10K_PRE_CAL_MODE_DT, + ATH10K_PRE_CAL_MODE_NVMEM, ATH10K_CAL_MODE_EEPROM, }; @@ -898,10 +900,14 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "otp"; case ATH10K_CAL_MODE_DT: return "dt"; + case ATH10K_CAL_MODE_NVMEM: + return "nvmem"; case ATH10K_PRE_CAL_MODE_FILE: return "pre-cal-file"; case ATH10K_PRE_CAL_MODE_DT: return "pre-cal-dt"; + case ATH10K_PRE_CAL_MODE_NVMEM: + return "pre-cal-nvmem"; case ATH10K_CAL_MODE_EEPROM: return "eeprom"; } diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 55e7e11d06d9..fe6b6f97a916 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1522,7 +1522,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) mutex_lock(&ar->dump_mutex); dump_data = (struct ath10k_dump_file_data *)(buf); - strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", + strscpy(dump_data->df_magic, "ATH10K-FW-DUMP", sizeof(dump_data->df_magic)); dump_data->len = cpu_to_le32(len); @@ -1543,11 +1543,11 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); - strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, + strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, sizeof(dump_data->fw_ver)); dump_data->kernel_ver_code = 0; - strlcpy(dump_data->kernel_ver, init_utsname()->release, + strscpy(dump_data->kernel_ver, init_utsname()->release, sizeof(dump_data->kernel_ver)); dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index a6de08d3bf4a..9a3a8907389b 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1401,115 +1401,6 @@ enum htt_dbg_stats_status { }; /* - * target -> host statistics upload - * - * The following field definitions describe the format of the HTT target - * to host stats upload confirmation message. - * The message contains a cookie echoed from the HTT host->target stats - * upload request, which identifies which request the confirmation is - * for, and a series of tag-length-value stats information elements. - * The tag-length header for each stats info element also includes a - * status field, to indicate whether the request for the stat type in - * question was fully met, partially met, unable to be met, or invalid - * (if the stat type in question is disabled in the target). - * A special value of all 1's in this status field is used to indicate - * the end of the series of stats info elements. - * - * - * |31 16|15 8|7 5|4 0| - * |------------------------------------------------------------| - * | reserved | msg type | - * |------------------------------------------------------------| - * | cookie LSBs | - * |------------------------------------------------------------| - * | cookie MSBs | - * |------------------------------------------------------------| - * | stats entry length | reserved | S |stat type| - * |------------------------------------------------------------| - * | | - * | type-specific stats info | - * | | - * |------------------------------------------------------------| - * | stats entry length | reserved | S |stat type| - * |------------------------------------------------------------| - * | | - * | type-specific stats info | - * | | - * |------------------------------------------------------------| - * | n/a | reserved | 111 | n/a | - * |------------------------------------------------------------| - * Header fields: - * - MSG_TYPE - * Bits 7:0 - * Purpose: identifies this is a statistics upload confirmation message - * Value: 0x9 - * - COOKIE_LSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: LSBs of the opaque cookie specified by the host-side requestor - * - COOKIE_MSBS - * Bits 31:0 - * Purpose: Provide a mechanism to match a target->host stats confirmation - * message with its preceding host->target stats request message. - * Value: MSBs of the opaque cookie specified by the host-side requestor - * - * Stats Information Element tag-length header fields: - * - STAT_TYPE - * Bits 4:0 - * Purpose: identifies the type of statistics info held in the - * following information element - * Value: htt_dbg_stats_type - * - STATUS - * Bits 7:5 - * Purpose: indicate whether the requested stats are present - * Value: htt_dbg_stats_status, including a special value (0x7) to mark - * the completion of the stats entry series - * - LENGTH - * Bits 31:16 - * Purpose: indicate the stats information size - * Value: This field specifies the number of bytes of stats information - * that follows the element tag-length header. - * It is expected but not required that this length is a multiple of - * 4 bytes. Even if the length is not an integer multiple of 4, the - * subsequent stats entry header will begin on a 4-byte aligned - * boundary. - */ - -#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F -#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0 -#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0 -#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5 - -struct htt_stats_conf_item { - union { - u8 info; - struct { - u8 stat_type:5; /* %HTT_DBG_STATS_ */ - u8 status:3; /* %HTT_DBG_STATS_STATUS_ */ - } __packed; - } __packed; - u8 pad; - __le16 length; - u8 payload[]; /* roundup(length, 4) long */ -} __packed; - -struct htt_stats_conf { - u8 pad[3]; - __le32 cookie_lsb; - __le32 cookie_msb; - - /* each item has variable length! */ - struct htt_stats_conf_item items[]; -} __packed; - -static inline struct htt_stats_conf_item *htt_stats_conf_next_item( - const struct htt_stats_conf_item *item) -{ - return (void *)item + sizeof(*item) + roundup(item->length, 4); -} - -/* * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank * * The following field definitions describe the format of the HTT host @@ -1828,7 +1719,6 @@ struct htt_resp { struct htt_rc_update rc_update; struct htt_rx_test rx_test; struct htt_pktlog_msg pktlog_msg; - struct htt_stats_conf stats_conf; struct htt_rx_pn_ind rx_pn_ind; struct htt_rx_offload_ind rx_offload_ind; struct htt_rx_in_ord_ind rx_in_ord_ind; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index d6b8bdcef416..b793eac2cfac 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -147,6 +147,9 @@ void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); } int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 6b03c7787e36..591ef7416b61 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -618,6 +618,9 @@ struct ath10k_hw_params { */ bool uart_pin_workaround; + /* Workaround for the credit size calculation */ + bool credit_size_workaround; + /* tx stats support over pktlog */ bool tx_stats_over_pktlog; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1f73fbfee0c0..b11aaee8b8c0 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6380,13 +6380,14 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, scan_timeout = min_t(u32, arg.max_rest_time * (arg.n_channels - 1) + (req->duration + ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * - arg.n_channels, arg.max_scan_time + 200); - + arg.n_channels, arg.max_scan_time); } else { - /* Add a 200ms margin to account for event/command processing */ - scan_timeout = arg.max_scan_time + 200; + scan_timeout = arg.max_scan_time; } + /* Add a 200ms margin to account for event/command processing */ + scan_timeout += 200; + ret = ath10k_start_scan(ar, &arg); if (ret) { ath10k_warn(ar, "failed to start hw scan: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 7c9ea0c073d8..6f8b64218894 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -82,8 +82,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, flags = skb_cb->flags; ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_dec_pending(htt); - if (htt->num_pending_tx == 0) - wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); rcu_read_lock(); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 7c1c2658cb5f..62c453a21e49 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2611,9 +2611,36 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_mac_handle_beacon(ar, skb); if (ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) + ieee80211_is_probe_resp(hdr->frame_control)) { + struct ieee80211_mgmt *mgmt = (void *)skb->data; + enum cfg80211_bss_frame_type ftype; + u8 *ies; + int ies_ch; + status->boottime_ns = ktime_get_boottime_ns(); + if (!ar->scan_channel) + goto drop; + + ies = mgmt->u.beacon.variable; + + if (ieee80211_is_beacon(mgmt->frame_control)) + ftype = CFG80211_BSS_FTYPE_BEACON; + else + ftype = CFG80211_BSS_FTYPE_PRESP; + + ies_ch = cfg80211_get_ies_channel_number(mgmt->u.beacon.variable, + skb_tail_pointer(skb) - ies, + sband->band, ftype); + + if (ies_ch > 0 && ies_ch != channel) { + ath10k_dbg(ar, ATH10K_DBG_MGMT, + "channel mismatched ds channel %d scan channel %d\n", + ies_ch, channel); + goto drop; + } + } + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", skb, skb->len, @@ -2627,6 +2654,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ieee80211_rx_ni(ar->hw, skb); return 0; + +drop: + dev_kfree_skb(skb); + return 0; } static int freq_to_idx(struct ath10k *ar, int freq) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 01bfd09a9d88..4abd12e78028 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3478,7 +3478,9 @@ struct wmi_phyerr_event { __le32 num_phyerrs; __le32 tsf_l32; __le32 tsf_u32; - struct wmi_phyerr phyerrs[]; + + /* array of struct wmi_phyerr */ + u8 phyerrs[]; } __packed; struct wmi_10_4_phyerr_event { diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 8c9c781afc3e..3fb0aa000825 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -175,8 +175,11 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) ath11k_ahb_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -206,13 +209,13 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -221,13 +224,13 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -300,7 +303,10 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_ahb_ext_grp_enable(irq_grp); } } diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index de8b632b058c..aaa7b05ff49d 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -14,6 +14,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE1: target->host HTT + HTC control */ @@ -40,6 +41,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -73,11 +75,12 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { - .flags = CE_ATTR_FLAGS, + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, @@ -89,6 +92,7 @@ const struct ce_attr ath11k_host_ce_config_ipq8074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE10: target->host HTT */ @@ -142,6 +146,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -175,6 +180,7 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ @@ -220,6 +226,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = { .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, + .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ @@ -489,18 +496,32 @@ err_unlock: return skb; } -static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe) +static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; + struct sk_buff_head list; + __skb_queue_head_init(&list); while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { if (!skb) continue; dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_any(skb); + + if ((!pipe->send_cb) || ab->hw_params.credit_flow) { + dev_kfree_skb_any(skb); + continue; + } + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath11k_dbg(ab, ATH11K_DBG_AHB, "tx ce pipe %d len %d\n", + pipe->pipe_num, skb->len); + pipe->send_cb(ab, skb); } } @@ -636,7 +657,7 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) pipe->attr_flags = attr->flags; if (attr->src_nentries) { - pipe->send_cb = ath11k_ce_send_done_cb; + pipe->send_cb = attr->send_cb; nentries = roundup_pow_of_two(attr->src_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); @@ -667,9 +688,10 @@ static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; - if (pipe->send_cb) - pipe->send_cb(pipe); + if (attr->src_nentries) + ath11k_ce_tx_process_cb(pipe); if (pipe->recv_cb) ath11k_ce_recv_process_cb(pipe); @@ -678,9 +700,10 @@ void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id]; - if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb) - pipe->send_cb(pipe); + if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries) + ath11k_ce_tx_process_cb(pipe); } EXPORT_SYMBOL(ath11k_ce_per_engine_service); @@ -953,6 +976,7 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab) void ath11k_ce_free_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; + struct ath11k_ce_ring *ce_ring; int desc_sz; int i; @@ -964,22 +988,24 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab) if (pipe->src_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); + ce_ring = pipe->src_ring; dma_free_coherent(ab->dev, pipe->src_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->src_ring->base_addr_owner_space, - pipe->src_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->src_ring); pipe->src_ring = NULL; } if (pipe->dest_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); + ce_ring = pipe->dest_ring; dma_free_coherent(ab->dev, pipe->dest_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->dest_ring->base_addr_owner_space, - pipe->dest_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->dest_ring); pipe->dest_ring = NULL; } @@ -987,11 +1013,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab) if (pipe->status_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); + ce_ring = pipe->status_ring; dma_free_coherent(ab->dev, pipe->status_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, - pipe->status_ring->base_addr_owner_space, - pipe->status_ring->base_addr_ce_space); + ce_ring->base_addr_owner_space_unaligned, + ce_ring->base_addr_ce_space_unaligned); kfree(pipe->status_ring); pipe->status_ring = NULL; } diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 713f766cac22..8255b6cfab0c 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -101,6 +101,7 @@ struct ce_attr { unsigned int dest_nentries; void (*recv_cb)(struct ath11k_base *, struct sk_buff *); + void (*send_cb)(struct ath11k_base *, struct sk_buff *); }; #define CE_DESC_RING_ALIGN 8 @@ -154,7 +155,7 @@ struct ath11k_ce_pipe { unsigned int buf_sz; unsigned int rx_buf_needed; - void (*send_cb)(struct ath11k_ce_pipe *); + void (*send_cb)(struct ath11k_base *, struct sk_buff *); void (*recv_cb)(struct ath11k_base *, struct sk_buff *); struct tasklet_struct intr_tq; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index b5a2af3ffc3e..293563b3f784 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -52,6 +52,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074, .svc_to_ce_map_len = 21, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, .single_pdev_only = false, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, @@ -74,14 +77,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, + .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = true, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), + .supports_regdb = false, .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, + .supports_rssi_stats = false, + .fw_wmi_diag_event = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -104,6 +119,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018, .svc_to_ce_map_len = 19, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, .single_pdev_only = false, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, @@ -123,14 +141,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, + .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = true, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), + .supports_regdb = false, .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, + .supports_rssi_stats = false, + .fw_wmi_diag_event = false, }, { .name = "qca6390 hw2.0", @@ -153,6 +183,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, + .rfkill_pin = 48, + .rfkill_cfg = 0, + .rfkill_on_level = 1, .single_pdev_only = true, .rxdma1_enable = false, .num_rxmda_per_pdev = 2, @@ -171,14 +204,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .supports_monitor = false, + .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, + .supports_sta_ps = true, .cold_boot_calib = false, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), + .supports_regdb = true, .fix_l1ss = true, + .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .wakeup_mhi = true, + .supports_rssi_stats = true, + .fw_wmi_diag_event = true, }, { .name = "qcn9074 hw1.0", @@ -201,6 +246,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, .svc_to_ce_map_len = 18, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, .rx_mac_buf_ring = false, @@ -219,14 +267,26 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, + .full_monitor_mode = true, .supports_shadow_regs = false, .idle_ps = false, + .supports_sta_ps = false, .cold_boot_calib = false, + .fw_mem_mode = 2, + .num_vdevs = 8, + .num_peers = 128, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), + .supports_regdb = false, .fix_l1ss = true, + .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, + .supports_dynamic_smps_6ghz = true, + .alloc_cacheable_memory = true, + .wakeup_mhi = false, + .supports_rssi_stats = false, + .fw_wmi_diag_event = false, }, { .name = "wcn6855 hw2.0", @@ -249,6 +309,9 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, .single_pdev_only = true, .rxdma1_enable = false, .num_rxmda_per_pdev = 2, @@ -267,14 +330,88 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .supports_monitor = false, + .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, + .supports_sta_ps = true, .cold_boot_calib = false, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), + .supports_regdb = true, .fix_l1ss = false, + .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .wakeup_mhi = true, + .supports_rssi_stats = true, + .fw_wmi_diag_event = true, + }, + { + .name = "wcn6855 hw2.1", + .hw_rev = ATH11K_HW_WCN6855_HW21, + .fw = { + .dir = "WCN6855/hw2.1", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, + .max_radios = 3, + .bdf_addr = 0x4B0C0000, + .hw_ops = &wcn6855_ops, + .ring_mask = &ath11k_hw_ring_mask_qca6390, + .internal_sleep_clock = true, + .regs = &wcn6855_regs, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, + .host_ce_config = ath11k_host_ce_config_qca6390, + .ce_count = 9, + .target_ce_config = ath11k_target_ce_config_wlan_qca6390, + .target_ce_count = 9, + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, + .svc_to_ce_map_len = 14, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + .single_pdev_only = true, + .rxdma1_enable = false, + .num_rxmda_per_pdev = 2, + .rx_mac_buf_ring = true, + .vdev_start_delay = true, + .htt_peer_map_v2 = false, + + .spectral = { + .fft_sz = 0, + .fft_pad_sz = 0, + .summary_pad_sz = 0, + .fft_hdr_len = 0, + .max_fft_bins = 0, + }, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .supports_monitor = false, + .supports_shadow_regs = true, + .idle_ps = true, + .supports_sta_ps = true, + .cold_boot_calib = false, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, + .supports_suspend = true, + .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), + .supports_regdb = true, + .fix_l1ss = false, + .credit_flow = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, + .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .wakeup_mhi = true, + .supports_rssi_stats = true, + .fw_wmi_diag_event = true, }, }; @@ -392,11 +529,26 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); - scnprintf(name, name_len, - "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", - ath11k_bus_str(ab->hif.bus), - ab->qmi.target.chip_id, - ab->qmi.target.board_id, variant); + switch (ab->id.bdf_search) { + case ATH11K_BDF_SEARCH_BUS_AND_BOARD: + scnprintf(name, name_len, + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", + ath11k_bus_str(ab->hif.bus), + ab->id.vendor, ab->id.device, + ab->id.subsystem_vendor, + ab->id.subsystem_device, + ab->qmi.target.chip_id, + ab->qmi.target.board_id, + variant); + break; + default: + scnprintf(name, name_len, + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", + ath11k_bus_str(ab->hif.bus), + ab->qmi.target.chip_id, + ab->qmi.target.board_id, variant); + break; + } ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -620,10 +772,12 @@ err: return ret; } -static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, - struct ath11k_board_data *bd) +int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, + struct ath11k_board_data *bd, + const char *name) { - bd->fw = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_BOARD_FILE); + bd->fw = ath11k_core_firmware_request(ab, name); + if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw); @@ -633,7 +787,7 @@ static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, return 0; } -#define BOARD_NAME_SIZE 100 +#define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { char boardname[BOARD_NAME_SIZE]; @@ -651,7 +805,7 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) goto success; ab->bd_api = 1; - ret = ath11k_core_fetch_board_data_api_1(ab, bd); + ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", ab->hw_params.fw.dir); @@ -663,6 +817,18 @@ success: return 0; } +int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) +{ + int ret; + + ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME); + if (ret) + ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n", + ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir); + + return ret; +} + static void ath11k_core_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) @@ -893,6 +1059,27 @@ err_firmware_stop: return ret; } +static int ath11k_core_rfkill_config(struct ath11k_base *ab) +{ + struct ath11k *ar; + int ret = 0, i; + + if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + + ret = ath11k_mac_rfkill_config(ar); + if (ret && ret != -EOPNOTSUPP) { + ath11k_warn(ab, "failed to configure rfkill: %d", ret); + return ret; + } + } + + return ret; +} + int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) { int ret; @@ -939,6 +1126,13 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) goto err_core_stop; } ath11k_hif_irq_enable(ab); + + ret = ath11k_core_rfkill_config(ab); + if (ret && ret != -EOPNOTSUPP) { + ath11k_err(ab, "failed to config rfkill: %d\n", ret); + goto err_core_stop; + } + mutex_unlock(&ab->core_lock); return 0; @@ -969,7 +1163,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) ath11k_dp_free(ab); ath11k_hal_srng_deinit(ab); - ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; ret = ath11k_hal_srng_init(ab); if (ret) @@ -1003,6 +1197,8 @@ void ath11k_core_halt(struct ath11k *ar) ath11k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ab->update_11d_work); + cancel_work_sync(&ab->rfkill_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); @@ -1010,6 +1206,56 @@ void ath11k_core_halt(struct ath11k *ar) idr_init(&ar->txmgmt_idr); } +static void ath11k_rfkill_work(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, rfkill_work); + struct ath11k *ar; + bool rfkill_radio_on; + int i; + + spin_lock_bh(&ab->base_lock); + rfkill_radio_on = ab->rfkill_radio_on; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + if (!ar) + continue; + + /* notify cfg80211 radio state change */ + ath11k_mac_rfkill_enable_radio(ar, rfkill_radio_on); + wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on); + } +} + +static void ath11k_update_11d(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work); + struct ath11k *ar; + struct ath11k_pdev *pdev; + struct wmi_set_current_country_params set_current_param = {}; + int ret, i; + + spin_lock_bh(&ab->base_lock); + memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2); + spin_unlock_bh(&ab->base_lock); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n", + set_current_param.alpha2[0], + set_current_param.alpha2[1]); + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + + ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + if (ret) + ath11k_warn(ar->ab, + "pdev id %d failed set current country code: %d\n", + i, ret); + } +} + static void ath11k_core_restart(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); @@ -1043,6 +1289,7 @@ static void ath11k_core_restart(struct work_struct *work) idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); + wake_up(&ar->txmgmt_empty_waitq); } wake_up(&ab->wmi_ab.tx_credits_wq); @@ -1179,12 +1426,15 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, mutex_init(&ab->core_lock); spin_lock_init(&ab->base_lock); + mutex_init(&ab->vdev_id_11d_lock); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); init_waitqueue_head(&ab->qmi.cold_boot_waitq); INIT_WORK(&ab->restart_work, ath11k_core_restart); + INIT_WORK(&ab->update_11d_work, ath11k_update_11d); + INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 31d234a51c79..9e88ccca5ca7 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -47,6 +47,11 @@ enum ath11k_supported_bw { ATH11K_BW_160 = 3, }; +enum ath11k_bdf_search { + ATH11K_BDF_SEARCH_DEFAULT, + ATH11K_BDF_SEARCH_BUS_AND_BOARD, +}; + enum wme_ac { WME_AC_BE, WME_AC_BK, @@ -112,6 +117,7 @@ enum ath11k_hw_rev { ATH11K_HW_IPQ6018_HW10, ATH11K_HW_QCN9074_HW10, ATH11K_HW_WCN6855_HW20, + ATH11K_HW_WCN6855_HW21, }; enum ath11k_firmware_mode { @@ -136,6 +142,7 @@ struct ath11k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device napi_ndev; }; @@ -194,6 +201,9 @@ enum ath11k_dev_flags { ATH11K_FLAG_REGISTERED, ATH11K_FLAG_QMI_FAIL, ATH11K_FLAG_HTC_SUSPEND_COMPLETE, + ATH11K_FLAG_CE_IRQ_ENABLED, + ATH11K_FLAG_EXT_IRQ_ENABLED, + ATH11K_FLAG_FIXED_MEM_RGN, }; enum ath11k_monitor_flags { @@ -240,6 +250,7 @@ struct ath11k_vif { bool is_started; bool is_up; bool spectral_enabled; + bool ps; u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; @@ -249,6 +260,8 @@ struct ath11k_vif { int txpower; bool rsnie_present; bool wpaie_present; + bool bcca_zero_sent; + bool do_not_send_tmpl; struct ieee80211_chanctx_conf chanctx; }; @@ -370,10 +383,13 @@ struct ath11k_sta { struct work_struct update_wk; struct work_struct set_4addr_wk; struct rate_info txrate; + u32 peer_nss; struct rate_info last_txrate; u64 rx_duration; u64 tx_duration; u8 rssi_comb; + s8 rssi_beacon; + s8 chain_signal[IEEE80211_MAX_CHAINS]; struct ath11k_htt_tx_stats *tx_stats; struct ath11k_rx_peer_stats *rx_stats; @@ -404,6 +420,10 @@ enum ath11k_state { /* Antenna noise floor */ #define ATH11K_DEFAULT_NOISE_FLOOR -95 +#define ATH11K_INVALID_RSSI_FULL -1 + +#define ATH11K_INVALID_RSSI_EMPTY -128 + struct ath11k_fw_stats { struct dentry *debugfs_fwstats; u32 pdev_id; @@ -539,6 +559,7 @@ struct ath11k { /* protects txmgmt_idr data */ spinlock_t txmgmt_idr_lock; atomic_t num_pending_mgmt_tx; + wait_queue_head_t txmgmt_empty_waitq; /* cycle count is reported twice for each visited channel during scan. * access protected by data_lock @@ -577,6 +598,11 @@ struct ath11k { #endif bool dfs_block_radar_events; struct ath11k_thermal thermal; + u32 vdev_id_11d_scan; + struct completion finish_11d_scan; + struct completion finish_11d_ch_list; + bool pending_11d; + bool regdom_set_by_user; }; struct ath11k_band_cap { @@ -703,6 +729,11 @@ struct ath11k_base { /* Protects data like peers */ spinlock_t base_lock; struct ath11k_pdev pdevs[MAX_RADIOS]; + struct { + enum WMI_HOST_WLAN_BAND supported_bands; + u32 pdev_id; + } target_pdev_ids[MAX_RADIOS]; + u8 target_pdev_count; struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS]; struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS]; unsigned long long free_vdev_map; @@ -713,7 +744,6 @@ struct ath11k_base { u32 wlan_init_status; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; - struct napi_struct *napi; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; @@ -746,6 +776,8 @@ struct ath11k_base { struct completion driver_recovery; struct workqueue_struct *workqueue; struct work_struct restart_work; + struct work_struct update_11d_work; + u8 new_alpha2[3]; struct { /* protected by data_lock */ u32 fw_crash_counter; @@ -754,11 +786,25 @@ struct ath11k_base { struct ath11k_dbring_cap *db_caps; u32 num_db_cap; + struct work_struct rfkill_work; + + /* true means radio is on */ + bool rfkill_radio_on; + /* To synchronize 11d scan vdev id */ + struct mutex vdev_id_11d_lock; struct timer_list mon_reap_timer; struct completion htc_suspend; + struct { + enum ath11k_bdf_search bdf_search; + u32 vendor; + u32 device; + u32 subsystem_vendor; + u32 subsystem_device; + } id; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; @@ -934,6 +980,10 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, void ath11k_core_free(struct ath11k_base *ath11k); int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, struct ath11k_board_data *bd); +int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd); +int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, + struct ath11k_board_data *bd, + const char *name); void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_check_dt(struct ath11k_base *ath11k); diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c index fd98ba5b1130..eda67ebfc4c2 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.c +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -6,6 +6,35 @@ #include "core.h" #include "debug.h" +#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf + +int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size) +{ + u32 *temp; + int idx; + + size = size >> 2; + + for (idx = 0, temp = buffer; idx < size; idx++, temp++) { + if (*temp == ATH11K_DB_MAGIC_VALUE) + return -EINVAL; + } + + return 0; +} + +static void ath11k_dbring_fill_magic_value(struct ath11k *ar, + void *buffer, u32 size) +{ + u32 *temp; + int idx; + + size = size >> 2; + + for (idx = 0, temp = buffer; idx < size; idx++, temp++) + *temp++ = ATH11K_DB_MAGIC_VALUE; +} + static int ath11k_dbring_bufs_replenish(struct ath11k *ar, struct ath11k_dbring *ring, struct ath11k_dbring_element *buff) @@ -26,6 +55,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar, ptr_unaligned = buff->payload; ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); + ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz); paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, DMA_FROM_DEVICE); @@ -87,17 +117,23 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar, req_entries = min(num_free, ring->bufs_max); num_remain = req_entries; align = ring->buf_align; - size = sizeof(*buff) + ring->buf_sz + align - 1; + size = ring->buf_sz + align - 1; while (num_remain > 0) { - buff = kzalloc(size, GFP_ATOMIC); + buff = kzalloc(sizeof(*buff), GFP_ATOMIC); if (!buff) break; + buff->payload = kzalloc(size, GFP_ATOMIC); + if (!buff->payload) { + kfree(buff); + break; + } ret = ath11k_dbring_bufs_replenish(ar, ring, buff); if (ret) { ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", num_remain, req_entries); + kfree(buff->payload); kfree(buff); break; } @@ -282,7 +318,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; num_entry = ev->fixed.num_buf_release_entry; - size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; + size = ring->buf_sz + ring->buf_align - 1; num_buff_reaped = 0; spin_lock_bh(&srng->lock); @@ -319,7 +355,8 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, ring->handler(ar, &handler_data); } - memset(buff, 0, size); + buff->paddr = 0; + memset(buff->payload, 0, size); ath11k_dbring_bufs_replenish(ar, ring, buff); } @@ -346,6 +383,7 @@ void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) idr_remove(&ring->bufs_idr, buf_id); dma_unmap_single(ar->ab->dev, buff->paddr, ring->buf_sz, DMA_FROM_DEVICE); + kfree(buff->payload); kfree(buff); } diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h index f7fce9ef9c36..ef906c687b8c 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.h +++ b/drivers/net/wireless/ath/ath11k/dbring.h @@ -13,7 +13,7 @@ struct ath11k_dbring_element { dma_addr_t paddr; - u8 payload[0]; + u8 *payload; }; struct ath11k_dbring_data { @@ -76,4 +76,6 @@ int ath11k_dbring_get_cap(struct ath11k_base *ab, struct ath11k_dbring_cap *db_cap); void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring); void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring); +int ath11k_dbring_validate_buffer(struct ath11k *ar, void *data, u32 size); + #endif /* ATH11K_DBRING_H */ diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c index c86de95fbdc5..958d87429062 100644 --- a/drivers/net/wireless/ath/ath11k/debug.c +++ b/drivers/net/wireless/ath/ath11k/debug.c @@ -17,7 +17,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_info(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_info(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_info); @@ -32,7 +32,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_err(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_err(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_err); @@ -47,7 +47,7 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; dev_warn_ratelimited(ab->dev, "%pV", &vaf); - /* TODO: Trace the log */ + trace_ath11k_log_warn(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_warn); @@ -68,7 +68,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask, if (ath11k_debug_mask & mask) dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); - /* TODO: trace log */ + trace_ath11k_log_dbg(ab, mask, &vaf); va_end(args); } @@ -100,6 +100,10 @@ void ath11k_dbg_dump(struct ath11k_base *ab, dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf); } } + + /* tracing code doesn't like null strings */ + trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "", + buf, len); } EXPORT_SYMBOL(ath11k_dbg_dump); diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h index 659a275e2eb3..fbbd5fe02aa8 100644 --- a/drivers/net/wireless/ath/ath11k/debug.h +++ b/drivers/net/wireless/ath/ath11k/debug.h @@ -60,7 +60,8 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab, #define ath11k_dbg(ar, dbg_mask, fmt, ...) \ do { \ - if (ath11k_debug_mask & dbg_mask) \ + if ((ath11k_debug_mask & dbg_mask) || \ + trace_ath11k_log_dbg_enabled()) \ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \ } while (0) diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 80afd35337a1..198ade90b725 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -3,6 +3,8 @@ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. */ +#include <linux/vmalloc.h> + #include "debugfs.h" #include "core.h" @@ -126,6 +128,11 @@ void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb goto complete; } + if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { + ar->debug.fw_stats_done = true; + goto complete; + } + if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { if (list_empty(&stats.vdevs)) { ath11k_warn(ab, "empty vdev stats"); @@ -195,7 +202,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ - timeout = jiffies + msecs_to_jiffies(3 * HZ); + timeout = jiffies + msecs_to_jiffies(3 * 1000); ath11k_debugfs_fw_stats_reset(ar); @@ -229,6 +236,38 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, return 0; } +int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id, + u32 vdev_id, u32 stats_id) +{ + struct ath11k_base *ab = ar->ab; + struct stats_request_params req_param; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH11K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + req_param.pdev_id = pdev_id; + req_param.vdev_id = vdev_id; + req_param.stats_id = stats_id; + + ret = ath11k_debugfs_fw_stats_request(ar, &req_param); + if (ret) + ath11k_warn(ab, "failed to request fw stats: %d\n", ret); + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", + pdev_id, vdev_id, stats_id); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h index ec743a015dc7..4c0740394c95 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.h +++ b/drivers/net/wireless/ath/ath11k/debugfs.h @@ -117,6 +117,8 @@ void ath11k_debugfs_unregister(struct ath11k *ar); void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb); void ath11k_debugfs_fw_stats_init(struct ath11k *ar); +int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id, + u32 vdev_id, u32 stats_id); static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar) { @@ -216,6 +218,12 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar) return 0; } +static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar, + u32 pdev_id, u32 vdev_id, u32 stats_id) +{ + return 0; +} + #endif /* CONFIG_MAC80211_DEBUGFS*/ #endif /* _ATH11K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c index fecd9718f5ce..1b1acbdf837a 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c @@ -126,85 +126,9 @@ void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, } void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, - struct sk_buff *msdu, struct hal_tx_status *ts) { - struct ath11k_base *ab = ar->ab; - struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; - enum hal_tx_rate_stats_pkt_type pkt_type; - enum hal_tx_rate_stats_sgi sgi; - enum hal_tx_rate_stats_bw bw; - struct ath11k_peer *peer; - struct ath11k_sta *arsta; - struct ieee80211_sta *sta; - u16 rate; - u8 rate_idx = 0; - int ret; - u8 mcs; - - rcu_read_lock(); - spin_lock_bh(&ab->base_lock); - peer = ath11k_peer_find_by_id(ab, ts->peer_id); - if (!peer || !peer->sta) { - ath11k_warn(ab, "failed to find the peer\n"); - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - return; - } - - sta = peer->sta; - arsta = (struct ath11k_sta *)sta->drv_priv; - - memset(&arsta->txrate, 0, sizeof(arsta->txrate)); - pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, - ts->rate_stats); - mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, - ts->rate_stats); - sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI, - ts->rate_stats); - bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats); - - if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A || - pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) { - ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, - pkt_type, - &rate_idx, - &rate); - if (ret < 0) - goto err_out; - arsta->txrate.legacy = rate; - } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) { - if (mcs > 7) { - ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs); - goto err_out; - } - - arsta->txrate.mcs = mcs + 8 * (arsta->last_txrate.nss - 1); - arsta->txrate.flags = RATE_INFO_FLAGS_MCS; - if (sgi) - arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) { - if (mcs > 9) { - ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs); - goto err_out; - } - - arsta->txrate.mcs = mcs; - arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; - if (sgi) - arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; - } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { - /* TODO */ - } - - arsta->txrate.nss = arsta->last_txrate.nss; - arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); - - ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); - -err_out: - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); + ath11k_dp_tx_update_txcompl(ar, ts); } static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h index 18dc65d9edcf..e6c11b3a40aa 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs_sta.h +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h @@ -19,7 +19,6 @@ void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, struct ath11k_per_peer_tx_stats *peer_stats, u8 legacy_rate_idx); void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, - struct sk_buff *msdu, struct hal_tx_status *ts); #else /* CONFIG_ATH11K_DEBUGFS */ @@ -34,7 +33,6 @@ ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, } static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, - struct sk_buff *msdu, struct hal_tx_status *ts) { } diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 8baaeeb8cf82..8b790ce72e5d 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -101,8 +101,11 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) if (!ring->vaddr_unaligned) return; - dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, - ring->paddr_unaligned); + if (ring->cached) + kfree(ring->vaddr_unaligned); + else + dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned); ring->vaddr_unaligned = NULL; } @@ -222,6 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); int max_entries = ath11k_hal_srng_get_max_entries(ab, type); int ret; + bool cached = false; if (max_entries < 0 || entry_sz < 0) return -EINVAL; @@ -230,9 +234,29 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, num_entries = max_entries; ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; - ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, - &ring->paddr_unaligned, - GFP_KERNEL); + + if (ab->hw_params.alloc_cacheable_memory) { + /* Allocate the reo dst and tx completion rings from cacheable memory */ + switch (type) { + case HAL_REO_DST: + case HAL_WBM2SW_RELEASE: + cached = true; + break; + default: + cached = false; + } + + if (cached) { + ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); + ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); + } + } + + if (!cached) + ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, + &ring->paddr_unaligned, + GFP_KERNEL); + if (!ring->vaddr_unaligned) return -ENOMEM; @@ -292,6 +316,11 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, return -EINVAL; } + if (cached) { + params.flags |= HAL_SRNG_FLAGS_CACHED; + ring->cached = 1; + } + ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); if (ret < 0) { ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", @@ -742,13 +771,12 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, const struct ath11k_hw_hal_params *hal_params; int grp_id = irq_grp->grp_id; int work_done = 0; - int i = 0, j; + int i, j; int tot_work_done = 0; - while (ab->hw_params.ring_mask->tx[grp_id] >> i) { - if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i)) - ath11k_dp_tx_completion_handler(ab, i); - i++; + if (ab->hw_params.ring_mask->tx[grp_id]) { + i = __fls(ab->hw_params.ring_mask->tx[grp_id]); + ath11k_dp_tx_completion_handler(ab, i); } if (ab->hw_params.ring_mask->rx_err[grp_id]) { @@ -1023,6 +1051,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab) INIT_LIST_HEAD(&dp->reo_cmd_list); INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); + INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); spin_lock_init(&dp->reo_cmd_lock); dp->reo_cmd_cache_flush_count = 0; diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 4794ca04f213..409d6cc5a1d5 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -64,6 +64,7 @@ struct dp_srng { dma_addr_t paddr; int size; u32 ring_id; + u8 cached; }; struct dp_rxdma_ring { @@ -88,6 +89,19 @@ struct dp_tx_ring { int tx_status_tail; }; +enum dp_mon_status_buf_state { + /* PPDU id matches in dst ring and status ring */ + DP_MON_STATUS_MATCH, + /* status ring dma is not done */ + DP_MON_STATUS_NO_DMA, + /* status ring is lagging, reap status ring */ + DP_MON_STATUS_LAG, + /* status ring is leading, reap dst ring and drop */ + DP_MON_STATUS_LEAD, + /* replinish monitor status ring */ + DP_MON_STATUS_REPLINISH, +}; + struct ath11k_pdev_mon_stats { u32 status_ppdu_state; u32 status_ppdu_start; @@ -103,6 +117,12 @@ struct ath11k_pdev_mon_stats { u32 dup_mon_buf_cnt; }; +struct dp_full_mon_mpdu { + struct list_head list; + struct sk_buff *head; + struct sk_buff *tail; +}; + struct dp_link_desc_bank { void *vaddr_unaligned; void *vaddr; @@ -134,7 +154,11 @@ struct ath11k_mon_data { u32 mon_last_buf_cookie; u64 mon_last_linkdesc_paddr; u16 chan_noise_floor; - + bool hold_mon_dst_ring; + enum dp_mon_status_buf_state buf_state; + dma_addr_t mon_status_paddr; + struct dp_full_mon_mpdu *mon_mpdu; + struct hal_sw_mon_ring_entries sw_mon_entries; struct ath11k_pdev_mon_stats rx_mon_stats; /* lock for monitor data */ spinlock_t mon_lock; @@ -244,6 +268,7 @@ struct ath11k_dp { struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; struct list_head reo_cmd_list; struct list_head reo_cmd_cache_flush_list; + struct list_head dp_full_mon_mpdu_list; u32 reo_cmd_cache_flush_count; /** * protects access to below fields, @@ -291,6 +316,7 @@ enum htt_h2t_msg_type { HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc, HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11, + HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17, }; #define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0) @@ -517,7 +543,8 @@ struct htt_ppdu_stats_cfg_cmd { } __packed; #define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) -#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) +#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9) #define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) enum htt_ppdu_stats_tag_type { @@ -955,6 +982,33 @@ struct htt_rx_ring_tlv_filter { u32 pkt_filter_flags3; /* DATA */ }; +#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0) +#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8) + +#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE BIT(0) +#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END BIT(1) +#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2) +#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3) + +/** + * Enumeration for full monitor mode destination ring select + * 0 - REO destination ring select + * 1 - FW destination ring select + * 2 - SW destination ring select + * 3 - Release destination ring select + */ +enum htt_rx_full_mon_release_ring { + HTT_RX_MON_RING_REO, + HTT_RX_MON_RING_FW, + HTT_RX_MON_RING_SW, + HTT_RX_MON_RING_RELEASE, +}; + +struct htt_rx_full_monitor_mode_cfg_cmd { + u32 info0; + u32 cfg; +} __packed; + /* HTT message target->host */ enum htt_t2h_msg_type { diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index c5320847b80a..c212a789421e 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -20,13 +20,15 @@ #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) -static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) +static inline +u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } -static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, + struct hal_rx_desc *desc) { if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; @@ -34,32 +36,34 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_bas return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } -static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline +bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } -static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } -static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, - struct sk_buff *skb) +static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; @@ -67,8 +71,8 @@ static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, return ieee80211_has_morefrags(hdr->frame_control); } -static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, - struct sk_buff *skb) +static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; @@ -76,37 +80,37 @@ static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } -static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } -static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_attention(desc); } -static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, __le32_to_cpu(attn->info2)); } -static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } -static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) +static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, __le32_to_cpu(attn->info2)) == @@ -154,68 +158,68 @@ static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, return errmap & DP_RX_MPDU_ERR_MSDU_LEN; } -static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } -static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } -static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } -static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } -static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } -static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } -static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } -static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } -static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } -static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, - struct hal_rx_desc *desc) +static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, + struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } @@ -233,14 +237,14 @@ static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } -static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) +static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, __le32_to_cpu(attn->info1)); } -static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; @@ -249,8 +253,8 @@ static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, return rx_pkt_hdr; } -static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { u32 tlv_tag; @@ -259,15 +263,15 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, return tlv_tag == HAL_RX_MPDU_START; } -static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, - struct hal_rx_desc *rx_desc) +static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, + struct hal_rx_desc *rx_desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); } -static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, - struct hal_rx_desc *desc, - u16 len) +static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, + struct hal_rx_desc *desc, + u16 len) { ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } @@ -1356,25 +1360,6 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, return 0; } -static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) -{ - u32 ret = 0; - - switch (sgi) { - case RX_MSDU_START_SGI_0_8_US: - ret = NL80211_RATE_INFO_HE_GI_0_8; - break; - case RX_MSDU_START_SGI_1_6_US: - ret = NL80211_RATE_INFO_HE_GI_1_6; - break; - case RX_MSDU_START_SGI_3_2_US: - ret = NL80211_RATE_INFO_HE_GI_3_2; - break; - } - - return ret; -} - static void ath11k_update_per_peer_tx_stats(struct ath11k *ar, struct htt_ppdu_stats *ppdu_stats, u8 user) @@ -1493,14 +1478,15 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar, arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; arsta->txrate.he_dcm = dcm; - arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); - arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( - (user_rate->ru_end - + arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); + arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc + ((user_rate->ru_end - user_rate->ru_start) + 1); break; } arsta->txrate.nss = nss; + arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); arsta->tx_duration += tx_duration; memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); @@ -2380,7 +2366,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, } rx_status->encoding = RX_ENC_HE; rx_status->nss = nss; - rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); + rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); break; } @@ -2596,36 +2582,30 @@ free_out: static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, struct napi_struct *napi, struct sk_buff_head *msdu_list, - int *quota, int ring_id) + int mac_id) { - struct ath11k_skb_rxcb *rxcb; struct sk_buff *msdu; struct ath11k *ar; struct ieee80211_rx_status rx_status = {0}; - u8 mac_id; int ret; if (skb_queue_empty(msdu_list)) return; - rcu_read_lock(); - - while (*quota && (msdu = __skb_dequeue(msdu_list))) { - rxcb = ATH11K_SKB_RXCB(msdu); - mac_id = rxcb->mac_id; - ar = ab->pdevs[mac_id].ar; - if (!rcu_dereference(ab->pdevs_active[mac_id])) { - dev_kfree_skb_any(msdu); - continue; - } + if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { + __skb_queue_purge(msdu_list); + return; + } - if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { - dev_kfree_skb_any(msdu); - continue; - } + ar = ab->pdevs[mac_id].ar; + if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { + __skb_queue_purge(msdu_list); + return; + } + while ((msdu = __skb_dequeue(msdu_list))) { ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); - if (ret) { + if (unlikely(ret)) { ath11k_dbg(ab, ATH11K_DBG_DATA, "Unable to process msdu %d", ret); dev_kfree_skb_any(msdu); @@ -2633,10 +2613,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); - (*quota)--; } - - rcu_read_unlock(); } int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, @@ -2645,19 +2622,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; int num_buffs_reaped[MAX_RADIOS] = {0}; - struct sk_buff_head msdu_list; + struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; int total_msdu_reaped = 0; struct hal_srng *srng; struct sk_buff *msdu; - int quota = budget; bool done = false; int buf_id, mac_id; struct ath11k *ar; - u32 *rx_desc; + struct hal_reo_dest_ring *desc; + enum hal_reo_dest_ring_push_reason push_reason; + u32 cookie; int i; - __skb_queue_head_init(&msdu_list); + for (i = 0; i < MAX_RADIOS; i++) + __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; @@ -2666,13 +2645,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, ath11k_hal_srng_access_begin(ab, srng); try_again: - while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { - struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; - enum hal_reo_dest_ring_push_reason push_reason; - u32 cookie; - + while (likely(desc = + (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, + srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, - desc.buf_addr_info.info1); + desc->buf_addr_info.info1); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); @@ -2681,7 +2658,7 @@ try_again: rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); - if (!msdu) { + if (unlikely(!msdu)) { ath11k_warn(ab, "frame rx with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -2697,37 +2674,41 @@ try_again: DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; - total_msdu_reaped++; push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, - desc.info0); - if (push_reason != - HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { + desc->info0); + if (unlikely(push_reason != + HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; continue; } - rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & + rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & + rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & + rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, - desc.rx_mpdu_info.meta_data); + desc->rx_mpdu_info.meta_data); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, - desc.rx_mpdu_info.info0); + desc->rx_mpdu_info.info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, - desc.info0); + desc->info0); rxcb->mac_id = mac_id; - __skb_queue_tail(&msdu_list, msdu); + __skb_queue_tail(&msdu_list[mac_id], msdu); - if (total_msdu_reaped >= quota && !rxcb->is_continuation) { + if (rxcb->is_continuation) { + done = false; + } else { + total_msdu_reaped++; done = true; - break; } + + if (total_msdu_reaped >= budget) + break; } /* Hw might have updated the head pointer after we cached it. @@ -2736,7 +2717,7 @@ try_again: * head pointer so that we can reap complete MPDU in the current * rx processing. */ - if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { + if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { ath11k_hal_srng_access_end(ab, srng); goto try_again; } @@ -2745,25 +2726,23 @@ try_again: spin_unlock_bh(&srng->lock); - if (!total_msdu_reaped) + if (unlikely(!total_msdu_reaped)) goto exit; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; + ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); + ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } - - ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, - "a, ring_id); - exit: - return budget - quota; + return total_msdu_reaped; } static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, @@ -2771,6 +2750,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, { struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; u32 num_msdu; + int i; if (!rx_stats) return; @@ -2832,6 +2812,13 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; arsta->rssi_comb = ppdu_info->rssi_comb; + + BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > + ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); + + for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++) + arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i]; + rx_stats->rx_duration += ppdu_info->rx_duration; arsta->rx_duration = rx_stats->rx_duration; } @@ -2945,6 +2932,43 @@ fail_desc_get: return req_entries - num_remain; } +#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535 + +static void +ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, + struct hal_tlv_hdr *tlv) +{ + struct hal_rx_ppdu_start *ppdu_start; + u16 ppdu_id_diff, ppdu_id, tlv_len; + u8 *ptr; + + /* PPDU id is part of second tlv, move ptr to second tlv */ + tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl); + ptr = (u8 *)tlv; + ptr += sizeof(*tlv) + tlv_len; + tlv = (struct hal_tlv_hdr *)ptr; + + if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START) + return; + + ptr += sizeof(*tlv); + ppdu_start = (struct hal_rx_ppdu_start *)ptr; + ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID, + __le32_to_cpu(ppdu_start->info0)); + + if (pmon->sw_mon_entries.ppdu_id < ppdu_id) { + pmon->buf_state = DP_MON_STATUS_LEAD; + ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id; + if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) + pmon->buf_state = DP_MON_STATUS_LAG; + } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) { + pmon->buf_state = DP_MON_STATUS_LAG; + ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id; + if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) + pmon->buf_state = DP_MON_STATUS_LEAD; + } +} + static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { @@ -2952,6 +2976,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, const struct ath11k_hw_hal_params *hal_params; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; + struct ath11k_mon_data *pmon; struct hal_srng *srng; void *rx_mon_status_desc; struct sk_buff *skb; @@ -2965,6 +2990,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; dp = &ar->dp; + pmon = &dp->mon_data; srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; @@ -2977,8 +3003,10 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, *budget -= 1; rx_mon_status_desc = ath11k_hal_srng_src_peek(ab, srng); - if (!rx_mon_status_desc) + if (!rx_mon_status_desc) { + pmon->buf_state = DP_MON_STATUS_REPLINISH; break; + } ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, &cookie, &rbm); @@ -2991,6 +3019,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); + pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } @@ -3010,10 +3039,18 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl)); dev_kfree_skb_any(skb); + pmon->buf_state = DP_MON_STATUS_NO_DMA; goto move_next; } + if (ab->hw_params.full_monitor_mode) { + ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); + if (paddr == pmon->mon_status_paddr) + pmon->buf_state = DP_MON_STATUS_MATCH; + } __skb_queue_tail(skb_list, skb); + } else { + pmon->buf_state = DP_MON_STATUS_REPLINISH; } move_next: skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, @@ -3064,10 +3101,10 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, if (!num_buffs_reaped) goto exit; - while ((skb = __skb_dequeue(&skb_list))) { - memset(&ppdu_info, 0, sizeof(ppdu_info)); - ppdu_info.peer_id = HAL_INVALID_PEERID; + memset(&ppdu_info, 0, sizeof(ppdu_info)); + ppdu_info.peer_id = HAL_INVALID_PEERID; + while ((skb = __skb_dequeue(&skb_list))) { if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { log_type = ATH11K_PKTLOG_TYPE_LITE_RX; rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; @@ -3095,10 +3132,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, ath11k_dbg(ab, ATH11K_DBG_DATA, "failed to find the peer with peer_id %d\n", ppdu_info.peer_id); - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - dev_kfree_skb_any(skb); - continue; + goto next_skb; } arsta = (struct ath11k_sta *)peer->sta->drv_priv; @@ -3107,10 +3141,13 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); +next_skb: spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); dev_kfree_skb_any(skb); + memset(&ppdu_info, 0, sizeof(ppdu_info)); + ppdu_info.peer_id = HAL_INVALID_PEERID; } exit: return num_buffs_reaped; @@ -3800,7 +3837,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && - rbm != ab->hw_params.hal_params->rx_buf_rbm) { + rbm != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); ath11k_dp_rx_link_desc_return(ab, desc, @@ -4829,7 +4866,7 @@ static struct sk_buff * ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct sk_buff *last_msdu, - struct ieee80211_rx_status *rxs) + struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *prev_buf; @@ -4839,12 +4876,17 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; struct rx_attention *rx_attention; + u32 err_bitmap; if (!head_msdu) goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); + + if (err_bitmap & DP_RX_MPDU_ERR_FCS) + *fcs_err = true; if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; @@ -4933,9 +4975,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; struct ieee80211_rx_status *rxs = &dp->rx_status; + bool fcs_err = false; mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, - tail_msdu, rxs); + tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; @@ -4943,6 +4986,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, header = mon_skb; rxs->flag = 0; + + if (fcs_err) + rxs->flag = RX_FLAG_FAILED_FCS_CRC; + do { skb_next = mon_skb->next; if (!skb_next) @@ -5091,6 +5138,357 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, } } +static u32 +ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, + struct hal_sw_mon_ring_entries *sw_mon_entries) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = &dp->mon_data; + struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; + struct sk_buff *msdu = NULL, *last = NULL; + struct hal_sw_monitor_ring *sw_desc = ring_entry; + struct hal_rx_msdu_list msdu_list; + struct hal_rx_desc *rx_desc; + struct ath11k_skb_rxcb *rxcb; + void *rx_msdu_link_desc; + void *p_buf_addr_info, *p_last_buf_addr_info; + int buf_id, i = 0; + u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset; + u32 rx_bufs_used = 0, msdu_cnt = 0; + u32 total_len = 0, frag_len = 0, sw_cookie; + u16 num_msdus = 0; + u8 rxdma_err, rbm; + bool is_frag, is_first_msdu; + bool drop_mpdu = false; + + ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries); + + sw_cookie = sw_mon_entries->mon_dst_sw_cookie; + sw_mon_entries->end_of_ppdu = false; + sw_mon_entries->drop_ppdu = false; + p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info; + msdu_cnt = sw_mon_entries->msdu_cnt; + + sw_mon_entries->end_of_ppdu = + FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0); + if (sw_mon_entries->end_of_ppdu) + return rx_bufs_used; + + if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON, + sw_desc->info0) == + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + rxdma_err = + FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE, + sw_desc->info0); + if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { + pmon->rx_mon_stats.dest_mpdu_drop++; + drop_mpdu = true; + } + } + + is_frag = false; + is_first_msdu = true; + + do { + rx_msdu_link_desc = + (u8 *)pmon->link_desc_banks[sw_cookie].vaddr + + (sw_mon_entries->mon_dst_paddr - + pmon->link_desc_banks[sw_cookie].paddr); + + ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, + &num_msdus); + + for (i = 0; i < num_msdus; i++) { + buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, + msdu_list.sw_cookie[i]); + + spin_lock_bh(&rx_ring->idr_lock); + msdu = idr_find(&rx_ring->bufs_idr, buf_id); + if (!msdu) { + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + "full mon msdu_pop: invalid buf_id %d\n", + buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + break; + } + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ATH11K_SKB_RXCB(msdu); + if (!rxcb->unmapped) { + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + + skb_tailroom(msdu), + DMA_FROM_DEVICE); + rxcb->unmapped = 1; + } + if (drop_mpdu) { + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + "full mon: i %d drop msdu %p *ppdu_id %x\n", + i, msdu, sw_mon_entries->ppdu_id); + dev_kfree_skb_any(msdu); + msdu_cnt--; + goto next_msdu; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + + rx_pkt_offset = sizeof(struct hal_rx_desc); + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); + + if (is_first_msdu) { + if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { + drop_mpdu = true; + dev_kfree_skb_any(msdu); + msdu = NULL; + goto next_msdu; + } + is_first_msdu = false; + } + + ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], + &is_frag, &total_len, + &frag_len, &msdu_cnt); + + rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; + + ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); + + if (!(*head_msdu)) + *head_msdu = msdu; + else if (last) + last->next = msdu; + + last = msdu; +next_msdu: + rx_bufs_used++; + } + + ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, + &sw_mon_entries->mon_dst_paddr, + &sw_mon_entries->mon_dst_sw_cookie, + &rbm, + &p_buf_addr_info); + + if (ath11k_dp_rx_monitor_link_desc_return(ar, + p_last_buf_addr_info, + dp->mac_id)) + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + "full mon: dp_rx_monitor_link_desc_return failed\n"); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (sw_mon_entries->mon_dst_paddr && msdu_cnt); + + if (last) + last->next = NULL; + + *tail_msdu = msdu; + + return rx_bufs_used; +} + +static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp, + struct dp_full_mon_mpdu *mon_mpdu, + struct sk_buff *head, + struct sk_buff *tail) +{ + mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); + if (!mon_mpdu) + return -ENOMEM; + + list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list); + mon_mpdu->head = head; + mon_mpdu->tail = tail; + + return 0; +} + +static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp, + struct dp_full_mon_mpdu *mon_mpdu) +{ + struct dp_full_mon_mpdu *tmp; + struct sk_buff *tmp_msdu, *skb_next; + + if (list_empty(&dp->dp_full_mon_mpdu_list)) + return; + + list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + + tmp_msdu = mon_mpdu->head; + while (tmp_msdu) { + skb_next = tmp_msdu->next; + dev_kfree_skb_any(tmp_msdu); + tmp_msdu = skb_next; + } + + kfree(mon_mpdu); + } +} + +static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, + int mac_id, + struct ath11k_mon_data *pmon, + struct napi_struct *napi) +{ + struct ath11k_pdev_mon_stats *rx_mon_stats; + struct dp_full_mon_mpdu *tmp; + struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu; + struct sk_buff *head_msdu, *tail_msdu; + struct ath11k_base *ab = ar->ab; + struct ath11k_dp *dp = &ab->dp; + int ret; + + rx_mon_stats = &pmon->rx_mon_stats; + + list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { + list_del(&mon_mpdu->list); + head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; + if (head_msdu && tail_msdu) { + ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, + tail_msdu, napi); + rx_mon_stats->dest_mpdu_done++; + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); + } + kfree(mon_mpdu); + } + + return ret; +} + +static int +ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = &dp->mon_data; + struct hal_sw_mon_ring_entries *sw_mon_entries; + int quota = 0, work = 0, count; + + sw_mon_entries = &pmon->sw_mon_entries; + + while (pmon->hold_mon_dst_ring) { + quota = ath11k_dp_rx_process_mon_status(ab, mac_id, + napi, 1); + if (pmon->buf_state == DP_MON_STATUS_MATCH) { + count = sw_mon_entries->status_buf_count; + if (count > 1) { + quota += ath11k_dp_rx_process_mon_status(ab, mac_id, + napi, count); + } + + ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id, + pmon, napi); + pmon->hold_mon_dst_ring = false; + } else if (!pmon->mon_status_paddr || + pmon->buf_state == DP_MON_STATUS_LEAD) { + sw_mon_entries->drop_ppdu = true; + pmon->hold_mon_dst_ring = false; + } + + if (!quota) + break; + + work += quota; + } + + if (sw_mon_entries->drop_ppdu) + ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu); + + return work; +} + +static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = &dp->mon_data; + struct hal_sw_mon_ring_entries *sw_mon_entries; + struct ath11k_pdev_mon_stats *rx_mon_stats; + struct sk_buff *head_msdu, *tail_msdu; + void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + void *ring_entry; + u32 rx_bufs_used = 0, mpdu_rx_bufs_used; + int quota = 0, ret; + bool break_dst_ring = false; + + spin_lock_bh(&pmon->mon_lock); + + sw_mon_entries = &pmon->sw_mon_entries; + rx_mon_stats = &pmon->rx_mon_stats; + + if (pmon->hold_mon_dst_ring) { + spin_unlock_bh(&pmon->mon_lock); + goto reap_status_ring; + } + + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); + while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { + head_msdu = NULL; + tail_msdu = NULL; + + mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry, + &head_msdu, + &tail_msdu, + sw_mon_entries); + rx_bufs_used += mpdu_rx_bufs_used; + + if (!sw_mon_entries->end_of_ppdu) { + if (head_msdu) { + ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp, + pmon->mon_mpdu, + head_msdu, + tail_msdu); + if (ret) + break_dst_ring = true; + } + + goto next_entry; + } else { + if (!sw_mon_entries->ppdu_id && + !sw_mon_entries->mon_status_paddr) { + break_dst_ring = true; + goto next_entry; + } + } + + rx_mon_stats->dest_ppdu_done++; + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + pmon->buf_state = DP_MON_STATUS_LAG; + pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr; + pmon->hold_mon_dst_ring = true; +next_entry: + ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, + mon_dst_srng); + if (break_dst_ring) + break; + } + + ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&pmon->mon_lock); + + if (rx_bufs_used) { + ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, + &dp->rxdma_mon_buf_ring, + rx_bufs_used, + HAL_RX_BUF_RBM_SW3_BM); + } + +reap_status_ring: + quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id, + napi, budget); + + return quota; +} + static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { @@ -5113,10 +5511,14 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); int ret = 0; - if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && + ab->hw_params.full_monitor_mode) + ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); + else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); else ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); + return ret; } diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 879fb2a9dc0c..91d6244b6543 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -9,6 +9,7 @@ #include "debugfs_sta.h" #include "hw.h" #include "peer.h" +#include "mac.h" static enum hal_tcl_encap_type ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) @@ -95,11 +96,11 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, u8 ring_selector = 0, ring_map = 0; bool tcl_ring_retry; - if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) + if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) return -ESHUTDOWN; - if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && - !ieee80211_is_data(hdr->frame_control)) + if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + !ieee80211_is_data(hdr->frame_control))) return -ENOTSUPP; pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); @@ -127,7 +128,7 @@ tcl_ring_sel: DP_TX_IDR_SIZE - 1, GFP_ATOMIC); spin_unlock_bh(&tx_ring->tx_idr_lock); - if (ret < 0) { + if (unlikely(ret < 0)) { if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; @@ -152,7 +153,7 @@ tcl_ring_sel: ti.meta_data_flags = arvif->tcl_metadata; } - if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) { + if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) { if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { ti.encrypt_type = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); @@ -173,8 +174,8 @@ tcl_ring_sel: ti.bss_ast_idx = arvif->ast_idx; ti.dscp_tid_tbl_idx = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL && - ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) { + if (likely(skb->ip_summed == CHECKSUM_PARTIAL && + ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) { ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) | @@ -211,7 +212,7 @@ tcl_ring_sel: } ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(ab->dev, ti.paddr)) { + if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; @@ -231,7 +232,7 @@ tcl_ring_sel: ath11k_hal_srng_access_begin(ab, tcl_ring); hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); - if (!hal_tcl_desc) { + if (unlikely(!hal_tcl_desc)) { /* NOTE: It is highly unlikely we'll be running out of tcl_ring * desc because the desc is directly enqueued onto hw queue. */ @@ -245,7 +246,7 @@ tcl_ring_sel: * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. */ - if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) && + if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) && ab->hw_params.max_tx_ring > 1) { tcl_ring_retry = true; ring_selector++; @@ -293,20 +294,18 @@ static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, struct sk_buff *msdu; struct ath11k_skb_cb *skb_cb; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } skb_cb = ATH11K_SKB_CB(msdu); - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dev_kfree_skb_any(msdu); @@ -325,12 +324,13 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, struct ath11k_skb_cb *skb_cb; struct ath11k *ar; - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); + spin_unlock(&tx_ring->tx_idr_lock); + + if (unlikely(!msdu)) { ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n", ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); return; } @@ -339,9 +339,6 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, ar = skb_cb->ar; - idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); - if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); @@ -418,6 +415,105 @@ static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar, } } +void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; + enum hal_tx_rate_stats_pkt_type pkt_type; + enum hal_tx_rate_stats_sgi sgi; + enum hal_tx_rate_stats_bw bw; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + struct ieee80211_sta *sta; + u16 rate, ru_tones; + u8 mcs, rate_idx, ofdma; + int ret; + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DP_TX, + "failed to find the peer by id %u\n", ts->peer_id); + goto err_out; + } + + sta = peer->sta; + arsta = (struct ath11k_sta *)sta->drv_priv; + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, + ts->rate_stats); + mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, + ts->rate_stats); + sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI, + ts->rate_stats); + bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats); + ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats); + ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats); + + /* This is to prefer choose the real NSS value arsta->last_txrate.nss, + * if it is invalid, then choose the NSS value while assoc. + */ + if (arsta->last_txrate.nss) + arsta->txrate.nss = arsta->last_txrate.nss; + else + arsta->txrate.nss = arsta->peer_nss; + + if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A || + pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) { + ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, + pkt_type, + &rate_idx, + &rate); + if (ret < 0) + goto err_out; + arsta->txrate.legacy = rate; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) { + if (mcs > 7) { + ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs); + goto err_out; + } + + if (arsta->txrate.nss != 0) + arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1); + arsta->txrate.flags = RATE_INFO_FLAGS_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) { + if (mcs > 9) { + ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs); + goto err_out; + } + + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { + if (mcs > 11) { + ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs); + goto err_out; + } + + arsta->txrate.mcs = mcs; + arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; + arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); + } + + arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); + if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { + arsta->txrate.bw = RATE_INFO_BW_HE_RU; + arsta->txrate.he_ru_alloc = + ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + } + + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) + ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); + +err_out: + spin_unlock_bh(&ab->base_lock); +} + static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_tx_status *ts) @@ -435,16 +531,14 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); - rcu_read_lock(); - - if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { + if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { dev_kfree_skb_any(msdu); - goto exit; + return; } - if (!skb_cb->vif) { + if (unlikely(!skb_cb->vif)) { dev_kfree_skb_any(msdu); - goto exit; + return; } info = IEEE80211_SKB_CB(msdu); @@ -465,7 +559,8 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { + if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) || + ab->hw_params.single_pdev_only) { if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { if (ar->last_ppdu_id == 0) { ar->last_ppdu_id = ts->ppdu_id; @@ -473,12 +568,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, ar->cached_ppdu_id == ar->last_ppdu_id) { ar->cached_ppdu_id = ar->last_ppdu_id; ar->cached_stats.is_ampdu = true; - ath11k_debugfs_sta_update_txcompl(ar, msdu, ts); + ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } else { ar->cached_stats.is_ampdu = false; - ath11k_debugfs_sta_update_txcompl(ar, msdu, ts); + ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } @@ -494,9 +589,6 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, */ ieee80211_tx_status(ar->hw, msdu); - -exit: - rcu_read_unlock(); } static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, @@ -505,11 +597,11 @@ static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, { ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0); - if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && - ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM) + if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && + ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) return; - if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) + if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) return; ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON, @@ -556,8 +648,9 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head); } - if ((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && - (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) { + if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && + (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == + tx_ring->tx_status_tail))) { /* TODO: Process pending tx_status messages when kfifo_is_full() */ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); } @@ -580,7 +673,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id); msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id); - if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) { + if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) { ath11k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status, mac_id, msdu_id, @@ -588,16 +681,16 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) continue; } - spin_lock_bh(&tx_ring->tx_idr_lock); - msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); - if (!msdu) { + spin_lock(&tx_ring->tx_idr_lock); + msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); + if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + spin_unlock(&tx_ring->tx_idr_lock); continue; } - idr_remove(&tx_ring->txbuf_idr, msdu_id); - spin_unlock_bh(&tx_ring->tx_idr_lock); + + spin_unlock(&tx_ring->tx_idr_lock); ar = ab->pdevs[mac_id].ar; @@ -903,7 +996,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - pdev_mask = 1 << (i + 1); + pdev_mask = 1 << (ar->pdev_idx + i); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); @@ -993,6 +1086,7 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, struct ath11k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ext_stats_cfg_cmd *cmd; + u32 pdev_id; int len = sizeof(*cmd); int ret; @@ -1006,7 +1100,12 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; - cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + if (ab->hw_params.single_pdev_only) + pdev_id = ath11k_mac_get_target_pdev_id(ar); + else + pdev_id = ar->pdev->pdev_id; + + cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cfg_params->cfg0; @@ -1034,6 +1133,15 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) struct htt_rx_ring_tlv_filter tlv_filter = {0}; int ret = 0, ring_id = 0, i; + if (ab->hw_params.full_monitor_mode) { + ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab, + dp->mac_id, !reset); + if (ret < 0) { + ath11k_err(ab, "failed to setup full monitor %d\n", ret); + return ret; + } + } + ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; if (!reset) { @@ -1099,3 +1207,42 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) return ret; } + +int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id, + bool config) +{ + struct htt_rx_full_monitor_mode_cfg_cmd *cmd; + struct sk_buff *skb; + int ret, len = sizeof(*cmd); + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE, + HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); + + cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id); + + cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE | + FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING, + HTT_RX_MON_RING_SW); + if (config) { + cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END | + HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END; + } + + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + + return ret; +} diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h index 698b907b878d..e87d65bfbf06 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.h +++ b/drivers/net/wireless/ath/ath11k/dp_tx.h @@ -15,6 +15,7 @@ struct ath11k_dp_htt_wbm_tx_status { int ack_rssi; }; +void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts); int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab); int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, struct ath11k_sta *arsta, struct sk_buff *skb); @@ -37,4 +38,6 @@ int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id, int rx_buf_size, struct htt_rx_ring_tlv_filter *tlv_filter); +int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id, + bool config); #endif diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index eaa0edca5576..2ec09ae90080 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -627,6 +627,21 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) return NULL; } +static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + + /* prefetch only if desc is available */ + desc = ath11k_hal_srng_dst_peek(ab, srng); + if (likely(desc)) { + dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc), + (srng->entry_size * sizeof(u32)), + DMA_FROM_DEVICE); + prefetch(desc); + } +} + u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { @@ -639,8 +654,15 @@ u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; - srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % - srng->ring_size; + srng->u.dst_ring.tp += srng->entry_size; + + /* wrap around to start of ring*/ + if (srng->u.dst_ring.tp == srng->ring_size) + srng->u.dst_ring.tp = 0; + + /* Try to prefetch the next descriptor in the ring */ + if (srng->flags & HAL_SRNG_FLAGS_CACHED) + ath11k_hal_srng_prefetch_desc(ab, srng); return desc; } @@ -775,11 +797,16 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); - if (srng->ring_dir == HAL_SRNG_DIR_SRC) + if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - else + } else { srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; + + /* Try to prefetch the next descriptor in the ring */ + if (srng->flags & HAL_SRNG_FLAGS_CACHED) + ath11k_hal_srng_prefetch_desc(ab, srng); + } } /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() @@ -947,6 +974,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, srng->msi_data = params->msi_data; srng->initialized = 1; spin_lock_init(&srng->lock); + lockdep_set_class(&srng->lock, hal->srng_key + ring_id); for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { srng->hwreg_base[i] = srng_config->reg_start[i] + @@ -1233,6 +1261,24 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) return 0; } +static void ath11k_hal_register_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_register_key(hal->srng_key + ring_id); +} + +static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_unregister_key(hal->srng_key + ring_id); +} + int ath11k_hal_srng_init(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; @@ -1252,6 +1298,8 @@ int ath11k_hal_srng_init(struct ath11k_base *ab) if (ret) goto err_free_cont_rdp; + ath11k_hal_register_srng_key(ab); + return 0; err_free_cont_rdp: @@ -1266,6 +1314,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; + ath11k_hal_unregister_srng_key(ab); ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 35ed3a14e200..a7d9b4c551ad 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -513,6 +513,7 @@ enum hal_srng_dir { #define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020 #define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000 #define HAL_SRNG_FLAGS_MSI_INTR 0x00020000 +#define HAL_SRNG_FLAGS_CACHED 0x20000000 #define HAL_SRNG_FLAGS_LMAC_RING 0x80000000 #define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) @@ -901,6 +902,8 @@ struct ath11k_hal { /* shadow register configuration */ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; + + struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX]; }; u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index 00b595b84939..406767672844 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -858,6 +858,25 @@ struct hal_reo_entrance_ring { * this ring has looped around the ring. */ +#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0) +#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2) +#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7) +#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR BIT(11) +#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT GENMASK(15, 12) +#define HAL_SW_MON_RING_INFO0_END_OF_PPDU BIT(16) + +#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0) +#define HAL_SW_MON_RING_INFO1_RING_ID GENMASK(27, 20) +#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT GENMASK(31, 28) + +struct hal_sw_monitor_ring { + struct ath11k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + struct ath11k_buffer_addr status_buf_addr_info; + u32 info0; + u32 info1; +} __packed; + #define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0) #define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16) diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index 329c404cfa80..a3b353a4b5f7 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -29,8 +29,7 @@ static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); desc = (struct hal_reo_get_queue_stats *)tlv->value; - memset(&desc->queue_addr_lo, 0, - (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + memset_startat(desc, 0, queue_addr_lo); desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) @@ -62,8 +61,7 @@ static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); desc = (struct hal_reo_flush_cache *)tlv->value; - memset(&desc->cache_addr_lo, 0, - (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + memset_startat(desc, 0, cache_addr_lo); desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) @@ -101,8 +99,7 @@ static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv, FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); desc = (struct hal_reo_update_rx_queue *)tlv->value; - memset(&desc->queue_addr_lo, 0, - (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + memset_startat(desc, 0, queue_addr_lo); desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) @@ -374,7 +371,7 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, wbm_desc->buf_addr_info.info1); - if (ret_buf_mgr != ab->hw_params.hal_params->rx_buf_rbm) { + if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; return -EINVAL; } @@ -764,15 +761,17 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, * size changes and also send WMI message to FW to change the REO * queue descriptor in Rx peer entry as part of dp_rx_tid_update. */ - memset(ext_desc, 0, 3 * sizeof(*ext_desc)); + memset(ext_desc, 0, sizeof(*ext_desc)); ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1); ext_desc++; + memset(ext_desc, 0, sizeof(*ext_desc)); ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2); ext_desc++; + memset(ext_desc, 0, sizeof(*ext_desc)); ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3); @@ -1039,7 +1038,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION, info0); - ppdu_info->ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + ppdu_info->ru_alloc = + ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones); + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; break; } @@ -1080,6 +1081,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, break; } case HAL_PHYRX_RSSI_LEGACY: { + int i; + bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map); struct hal_rx_phyrx_rssi_legacy_info *rssi = (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; @@ -1090,6 +1094,14 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, ppdu_info->rssi_comb = FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB, __le32_to_cpu(rssi->info0)); + + if (db2dbm) { + for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) { + ppdu_info->rssi_chain_pri20[i] = + le32_get_bits(rssi->preamble[i].rssi_2040, + HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20); + } + } break; } case HAL_RX_MPDU_START: { @@ -1186,3 +1198,47 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, *pp_buf_addr = (void *)buf_addr_info; } + +void +ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc, + struct hal_sw_mon_ring_entries *sw_mon_entries) +{ + struct hal_sw_monitor_ring *sw_mon_ring = rx_desc; + struct ath11k_buffer_addr *buf_addr_info; + struct ath11k_buffer_addr *status_buf_addr_info; + struct rx_mpdu_desc *rx_mpdu_desc_info_details; + + rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info; + + sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT, + rx_mpdu_desc_info_details->info0); + + buf_addr_info = &sw_mon_ring->buf_addr_info; + status_buf_addr_info = &sw_mon_ring->status_buf_addr_info; + + sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, + buf_addr_info->info1)) << 32) | + FIELD_GET(BUFFER_ADDR_INFO0_ADDR, + buf_addr_info->info0); + + sw_mon_entries->mon_status_paddr = + (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, + status_buf_addr_info->info1)) << 32) | + FIELD_GET(BUFFER_ADDR_INFO0_ADDR, + status_buf_addr_info->info0); + + sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, + buf_addr_info->info1); + + sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, + status_buf_addr_info->info1); + + sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT, + sw_mon_ring->info0); + + sw_mon_entries->dst_buf_addr_info = buf_addr_info; + sw_mon_entries->status_buf_addr_info = status_buf_addr_info; + + sw_mon_entries->ppdu_id = + FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1); +} diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h index 0f1f04b812b9..571054c6d7f8 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.h +++ b/drivers/net/wireless/ath/ath11k/hal_rx.h @@ -77,6 +77,20 @@ enum hal_rx_mon_status { HAL_RX_MON_STATUS_BUF_DONE, }; +struct hal_sw_mon_ring_entries { + dma_addr_t mon_dst_paddr; + dma_addr_t mon_status_paddr; + u32 mon_dst_sw_cookie; + u32 mon_status_sw_cookie; + void *dst_buf_addr_info; + void *status_buf_addr_info; + u16 ppdu_id; + u8 status_buf_count; + u8 msdu_cnt; + bool end_of_ppdu; + bool drop_ppdu; +}; + struct hal_rx_mon_ppdu_info { u32 ppdu_id; u32 ppdu_ts; @@ -98,6 +112,7 @@ struct hal_rx_mon_ppdu_info { u8 ldpc; u8 beamformed; u8 rssi_comb; + u8 rssi_chain_pri20[HAL_RX_MAX_NSS]; u8 tid; u8 dcm; u8 ru_alloc; @@ -248,8 +263,17 @@ struct hal_rx_he_sig_b2_ofdma_info { #define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8) +#define HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20 GENMASK(7, 0) + +struct hal_rx_phyrx_chain_rssi { + __le32 rssi_2040; + __le32 rssi_80; +} __packed; + struct hal_rx_phyrx_rssi_legacy_info { - __le32 rsvd[35]; + __le32 rsvd[3]; + struct hal_rx_phyrx_chain_rssi pre_rssi[HAL_RX_MAX_NSS]; + struct hal_rx_phyrx_chain_rssi preamble[HAL_RX_MAX_NSS]; __le32 info0; } __packed; @@ -331,38 +355,14 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, void **pp_buf_addr_info, u8 *rbm, u32 *msdu_cnt); +void +ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc, + struct hal_sw_mon_ring_entries *sw_mon_ent); enum hal_rx_mon_status ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab, struct hal_rx_mon_ppdu_info *ppdu_info, struct sk_buff *skb); -static inline u32 ath11k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) -{ - u32 ret = 0; - - switch (ru_tones) { - case RU_26: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; - break; - case RU_52: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; - break; - case RU_106: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; - break; - case RU_242: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; - break; - case RU_484: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; - break; - case RU_996: - ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - } - return ret; -} - #define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF #define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF #define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 54b1d34724d7..6913b7494b9b 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -81,6 +81,8 @@ int ath11k_htc_send(struct ath11k_htc *htc, struct ath11k_base *ab = htc->ab; int credits = 0; int ret; + bool credit_flow_enabled = (ab->hw_params.credit_flow && + ep->tx_credit_flow_enabled); if (eid >= ATH11K_HTC_EP_COUNT) { ath11k_warn(ab, "Invalid endpoint id: %d\n", eid); @@ -89,7 +91,7 @@ int ath11k_htc_send(struct ath11k_htc *htc, skb_push(skb, sizeof(struct ath11k_htc_hdr)); - if (ep->tx_credit_flow_enabled) { + if (credit_flow_enabled) { credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { @@ -126,7 +128,7 @@ int ath11k_htc_send(struct ath11k_htc *htc, err_unmap: dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: - if (ep->tx_credit_flow_enabled) { + if (credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; ath11k_dbg(ab, ATH11K_DBG_HTC, @@ -203,23 +205,25 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc, break; } - switch (record->hdr.id) { - case ATH11K_HTC_RECORD_CREDITS: - len = sizeof(struct ath11k_htc_credit_report); - if (record->hdr.len < len) { - ath11k_warn(ab, "Credit report too long\n"); - status = -EINVAL; + if (ab->hw_params.credit_flow) { + switch (record->hdr.id) { + case ATH11K_HTC_RECORD_CREDITS: + len = sizeof(struct ath11k_htc_credit_report); + if (record->hdr.len < len) { + ath11k_warn(ab, "Credit report too long\n"); + status = -EINVAL; + break; + } + ath11k_htc_process_credit_report(htc, + record->credit_report, + record->hdr.len, + src_eid); + break; + default: + ath11k_warn(ab, "Unhandled record: id:%d length:%d\n", + record->hdr.id, record->hdr.len); break; } - ath11k_htc_process_credit_report(htc, - record->credit_report, - record->hdr.len, - src_eid); - break; - default: - ath11k_warn(ab, "Unhandled record: id:%d length:%d\n", - record->hdr.id, record->hdr.len); - break; } if (status) @@ -245,6 +249,29 @@ static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack) complete(&ab->htc_suspend); } +void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k_htc *htc = &ab->htc; + struct ath11k_htc_ep *ep; + void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *); + u8 eid; + + eid = ATH11K_SKB_CB(skb)->eid; + if (eid >= ATH11K_HTC_EP_COUNT) + return; + + ep = &htc->endpoint[eid]; + spin_lock_bh(&htc->tx_lock); + ep_tx_complete = ep->ep_ops.ep_tx_complete; + spin_unlock_bh(&htc->tx_lock); + if (!ep_tx_complete) { + dev_kfree_skb_any(skb); + return; + } + ep_tx_complete(htc->ab, skb); +} + void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { @@ -607,6 +634,11 @@ int ath11k_htc_connect_service(struct ath11k_htc *htc, disable_credit_flow_ctrl = true; } + if (!ab->hw_params.credit_flow) { + flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; + disable_credit_flow_ctrl = true; + } + req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags); req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID, conn_req->service_id); @@ -732,7 +764,10 @@ int ath11k_htc_start(struct ath11k_htc *htc) msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID, ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID); - ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n"); + if (ab->hw_params.credit_flow) + ath11k_dbg(ab, ATH11K_DBG_HTC, "HTC is using TX credit flow control\n"); + else + msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW; status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb); if (status) { diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h index 6c8a469d7f9d..f429b37cfdf7 100644 --- a/drivers/net/wireless/ath/ath11k/htc.h +++ b/drivers/net/wireless/ath/ath11k/htc.h @@ -83,8 +83,8 @@ enum ath11k_htc_conn_flags { ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1, ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2, ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3, - ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2, - ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3 + ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4, + ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8, }; enum ath11k_htc_conn_svc_status { @@ -116,6 +116,8 @@ struct ath11k_htc_conn_svc_resp { u32 svc_meta_pad; } __packed; +#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1) + struct ath11k_htc_setup_complete_extended { u32 msg_id; u32 flags; @@ -305,5 +307,6 @@ int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid, struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size); void ath11k_htc_rx_completion_handler(struct ath11k_base *ar, struct sk_buff *skb); - +void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, + struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index da35fcf5bc56..3b0fdc1a6b3f 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -150,18 +150,18 @@ static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab) static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab, struct target_resource_config *config) { - config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS; + config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab); if (ab->num_radios == 2) { - config->num_peers = TARGET_NUM_PEERS(DBS); - config->num_tids = TARGET_NUM_TIDS(DBS); + config->num_peers = TARGET_NUM_PEERS(ab, DBS); + config->num_tids = TARGET_NUM_TIDS(ab, DBS); } else if (ab->num_radios == 3) { - config->num_peers = TARGET_NUM_PEERS(DBS_SBS); - config->num_tids = TARGET_NUM_TIDS(DBS_SBS); + config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS); + config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS); } else { /* Control should not reach here */ - config->num_peers = TARGET_NUM_PEERS(SINGLE); - config->num_tids = TARGET_NUM_TIDS(SINGLE); + config->num_peers = TARGET_NUM_PEERS(ab, SINGLE); + config->num_tids = TARGET_NUM_TIDS(ab, SINGLE); } config->num_offload_peers = TARGET_NUM_OFFLD_PEERS; config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; @@ -1061,8 +1061,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = { const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = { .tx = { ATH11K_TX_RING_MASK_0, - ATH11K_TX_RING_MASK_1, - ATH11K_TX_RING_MASK_2, }, .rx_mon_status = { 0, 0, 0, 0, diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 19223d36846e..29934b36c14e 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -12,26 +12,26 @@ /* Target configuration defines */ /* Num VDEVS per radio */ -#define TARGET_NUM_VDEVS (16 + 1) +#define TARGET_NUM_VDEVS(ab) (ab->hw_params.num_vdevs) -#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS) +#define TARGET_NUM_PEERS_PDEV(ab) (ab->hw_params.num_peers + TARGET_NUM_VDEVS(ab)) /* Num of peers for Single Radio mode */ -#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_SINGLE(ab) (TARGET_NUM_PEERS_PDEV(ab)) /* Num of peers for DBS */ -#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_DBS(ab) (2 * TARGET_NUM_PEERS_PDEV(ab)) /* Num of peers for DBS_SBS */ -#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV) +#define TARGET_NUM_PEERS_DBS_SBS(ab) (3 * TARGET_NUM_PEERS_PDEV(ab)) /* Max num of stations (per radio) */ -#define TARGET_NUM_STATIONS 512 +#define TARGET_NUM_STATIONS(ab) (ab->hw_params.num_peers) -#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x +#define TARGET_NUM_PEERS(ab, x) TARGET_NUM_PEERS_##x(ab) #define TARGET_NUM_PEER_KEYS 2 -#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \ - 4 * TARGET_NUM_VDEVS + 8) +#define TARGET_NUM_TIDS(ab, x) (2 * TARGET_NUM_PEERS(ab, x) + \ + 4 * TARGET_NUM_VDEVS(ab) + 8) #define TARGET_AST_SKID_LIMIT 16 #define TARGET_NUM_OFFLD_PEERS 4 @@ -77,6 +77,7 @@ #define ATH11K_DEFAULT_CAL_FILE "caldata.bin" #define ATH11K_AMSS_FILE "amss.bin" #define ATH11K_M3_FILE "m3.bin" +#define ATH11K_REGDB_FILE_NAME "regdb.bin" enum ath11k_hw_rate_cck { ATH11K_HW_RATE_CCK_LP_11M = 0, @@ -151,6 +152,9 @@ struct ath11k_hw_params { u32 svc_to_ce_map_len; bool single_pdev_only; + u32 rfkill_pin; + u32 rfkill_cfg; + u32 rfkill_on_level; bool rxdma1_enable; int num_rxmda_per_pdev; @@ -168,14 +172,26 @@ struct ath11k_hw_params { u16 interface_modes; bool supports_monitor; + bool full_monitor_mode; bool supports_shadow_regs; bool idle_ps; + bool supports_sta_ps; bool cold_boot_calib; + int fw_mem_mode; + u32 num_vdevs; + u32 num_peers; bool supports_suspend; u32 hal_desc_sz; + bool supports_regdb; bool fix_l1ss; + bool credit_flow; u8 max_tx_ring; const struct ath11k_hw_hal_params *hal_params; + bool supports_dynamic_smps_6ghz; + bool alloc_cacheable_memory; + bool wakeup_mhi; + bool supports_rssi_stats; + bool fw_wmi_diag_event; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 1cc55602787b..07f499d5ec92 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <net/mac80211.h> @@ -245,6 +246,93 @@ static const u32 ath11k_smps_map[] = { static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy) +{ + enum nl80211_he_ru_alloc ret; + + switch (ru_phy) { + case RU_26: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; + break; + case RU_52: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; + break; + case RU_106: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; + break; + case RU_242: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; + break; + case RU_484: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; + break; + case RU_996: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + default: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; + break; + } + + return ret; +} + +enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) +{ + enum nl80211_he_ru_alloc ret; + + switch (ru_tones) { + case 26: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; + break; + case 52: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; + break; + case 106: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; + break; + case 242: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; + break; + case 484: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; + break; + case 996: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case (996 * 2): + ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + default: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; + break; + } + + return ret; +} + +enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi) +{ + enum nl80211_he_gi ret; + + switch (sgi) { + case RX_MSDU_START_SGI_0_8_US: + ret = NL80211_RATE_INFO_HE_GI_0_8; + break; + case RX_MSDU_START_SGI_1_6_US: + ret = NL80211_RATE_INFO_HE_GI_1_6; + break; + case RX_MSDU_START_SGI_3_2_US: + ret = NL80211_RATE_INFO_HE_GI_3_2; + break; + default: + ret = NL80211_RATE_INFO_HE_GI_0_8; + break; + } + + return ret; +} + u8 ath11k_mac_bw_to_mac80211_bw(u8 bw) { u8 ret = 0; @@ -553,6 +641,67 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id) return NULL; } +struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + struct ath11k_vif *arvif; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up) + return arvif; + } + } + + return NULL; +} + +static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) +{ + return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) || + (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) && + (band2 & WMI_HOST_WLAN_5G_CAP))); +} + +u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_base *ab = ar->ab; + struct ieee80211_vif *vif = arvif->vif; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 pdev_id = ab->target_pdev_ids[0].pdev_id; + int i; + + if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) + return pdev_id; + + band = def.chan->band; + + for (i = 0; i < ab->target_pdev_count; i++) { + if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands)) + return ab->target_pdev_ids[i].pdev_id; + } + + return pdev_id; +} + +u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + + arvif = ath11k_mac_get_vif_up(ar->ab); + + if (arvif) + return ath11k_mac_get_target_pdev_id_from_vif(arvif); + else + return ar->ab->target_pdev_ids[0].pdev_id; +} + static void ath11k_pdev_caps_update(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; @@ -775,9 +924,9 @@ static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id, arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); arg.channel.min_power = 0; - arg.channel.max_power = channel->max_power * 2; - arg.channel.max_reg_power = channel->max_reg_power * 2; - arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + arg.channel.max_power = channel->max_power; + arg.channel.max_reg_power = channel->max_reg_power; + arg.channel.max_antenna_gain = channel->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; @@ -1049,6 +1198,83 @@ static int ath11k_mac_monitor_stop(struct ath11k *ar) return 0; } +static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_conf *conf = &ar->hw->conf; + enum wmi_sta_powersave_param param; + enum wmi_sta_ps_mode psmode; + int ret; + int timeout; + bool enable_ps; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (arvif->vif->type != NL80211_IFTYPE_STATION) + return 0; + + enable_ps = arvif->ps; + + if (!arvif->is_started) { + /* mac80211 can update vif powersave state while disconnected. + * Firmware doesn't behave nicely and consumes more power than + * necessary if PS is disabled on a non-started vdev. Hence + * force-enable PS for non-running vdevs. + */ + psmode = WMI_STA_PS_MODE_ENABLED; + } else if (enable_ps) { + psmode = WMI_STA_PS_MODE_ENABLED; + param = WMI_STA_PS_PARAM_INACTIVITY_TIME; + + timeout = conf->dynamic_ps_timeout; + if (timeout == 0) { + /* firmware doesn't like 0 */ + timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; + } + + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, + timeout); + if (ret) { + ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", + arvif->vdev_id, ret); + return ret; + } + } else { + psmode = WMI_STA_PS_MODE_DISABLED; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d psmode %s\n", + arvif->vdev_id, psmode ? "enable" : "disable"); + + ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); + if (ret) { + ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", + psmode, arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +static int ath11k_mac_config_ps(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_mac_vif_setup_ps(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret); + break; + } + } + + return ret; +} + static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) { struct ath11k *ar = hw->priv; @@ -1137,11 +1363,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; + else + arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; + else + arvif->wpaie_present = false; ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); @@ -1154,6 +1384,26 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) return ret; } +void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) +{ + struct ieee80211_vif *vif = arvif->vif; + + if (!vif->color_change_active && !arvif->bcca_zero_sent) + return; + + if (vif->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif)) { + arvif->bcca_zero_sent = true; + ieee80211_color_change_finish(vif); + return; + } + + arvif->bcca_zero_sent = false; + + if (vif->color_change_active) + ieee80211_beacon_update_cntdwn(vif); + ath11k_mac_setup_bcn_tmpl(arvif); +} + static void ath11k_control_beaconing(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { @@ -1819,7 +2069,6 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ath11k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; - u8 ampdu_factor; enum nl80211_band band; u16 *he_mcs_mask; u8 max_nss, he_mcs; @@ -1827,6 +2076,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, int i, he_nss, nss_idx; bool user_rate_valid = true; u32 rx_nss, tx_nss, nss_160; + u8 ampdu_factor, rx_mcs_80, rx_mcs_160; + u16 mcs_160_map, mcs_80_map; + bool support_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; @@ -1841,6 +2093,39 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, return; arg->he_flag = true; + support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); + + /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ + mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + + if (support_160) { + for (i = 7; i >= 0; i--) { + u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; + + if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + rx_mcs_160 = i + 1; + break; + } + } + } + + for (i = 7; i >= 0; i--) { + u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; + + if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { + rx_mcs_80 = i + 1; + break; + } + } + + if (support_160) + max_nss = min(rx_mcs_80, rx_mcs_160); + else + max_nss = rx_mcs_80; + + arg->peer_nss = min(sta->rx_nss, max_nss); memcpy_and_pad(&arg->peer_he_cap_macinfo, sizeof(arg->peer_he_cap_macinfo), @@ -2343,8 +2628,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, struct peer_assoc_params *arg, bool reassoc) { + struct ath11k_sta *arsta; + lockdep_assert_held(&ar->conf_mutex); + arsta = (struct ath11k_sta *)sta->drv_priv; + memset(arg, 0, sizeof(*arg)); reinit_completion(&ar->peer_assoc_done); @@ -2361,6 +2650,8 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar, ath11k_peer_assoc_h_qos(ar, vif, sta, arg); ath11k_peer_assoc_h_smps(sta, arg); + arsta->peer_nss = arg->peer_nss; + /* TODO: amsdu_disable req? */ } @@ -2397,6 +2688,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ath11k_vif *arvif = (void *)vif->drv_priv; struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; + struct ath11k_peer *peer; + bool is_auth = false; int ret; lockdep_assert_held(&ar->conf_mutex); @@ -2418,6 +2711,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, rcu_read_unlock(); + peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", @@ -2458,19 +2752,37 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); - /* Authorize BSS Peer */ - ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, - arvif->vdev_id, - WMI_PEER_AUTHORIZE, - 1); - if (ret) - ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); + if (peer && peer->is_authorized) + is_auth = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (is_auth) { + ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); + } ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, &bss_conf->he_obss_pd); if (ret) ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", arvif->vdev_id, ret); + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_DTIM_POLICY, + WMI_DTIM_POLICY_STICK); + if (ret) + ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n", + arvif->vdev_id, ret); + + ath11k_mac_11d_scan_stop_all(ar->ab); } static void ath11k_bss_disassoc(struct ieee80211_hw *hw, @@ -2805,10 +3117,17 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, "Set staggered beacon mode for VDEV: %d\n", arvif->vdev_id); - ret = ath11k_mac_setup_bcn_tmpl(arvif); - if (ret) - ath11k_warn(ar->ab, "failed to update bcn template: %d\n", - ret); + if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) { + ret = ath11k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath11k_warn(ar->ab, "failed to update bcn template: %d\n", + ret); + } + + if (arvif->bcca_zero_sent) + arvif->do_not_send_tmpl = true; + else + arvif->do_not_send_tmpl = false; } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -2942,6 +3261,16 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ath11k_mac_txpower_recalc(ar); } + if (changed & BSS_CHANGED_PS && + ar->ab->hw_params.supports_sta_ps) { + arvif->ps = vif->bss_conf.ps; + + ret = ath11k_mac_config_ps(ar); + if (ret) + ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n", + arvif->vdev_id, ret); + } + if (changed & BSS_CHANGED_MCAST_RATE && !ath11k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; @@ -3009,6 +3338,25 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); + + param_id = WMI_VDEV_PARAM_BSS_COLOR; + if (info->he_bss_color.enabled) + param_value = info->he_bss_color.color << + IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET; + else + param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED; + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + param_value); + if (ret) + ath11k_warn(ar->ab, + "failed to set bss color param on vdev %i: %d\n", + arvif->vdev_id, ret); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "bss color param 0x%x set on vdev %i\n", + param_value, arvif->vdev_id); } else if (vif->type == NL80211_IFTYPE_STATION) { ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar, arvif->vdev_id, @@ -3164,6 +3512,7 @@ static int ath11k_start_scan(struct ath11k *ar, struct scan_req_params *arg) { int ret; + unsigned long timeout = 1 * HZ; lockdep_assert_held(&ar->conf_mutex); @@ -3174,7 +3523,14 @@ static int ath11k_start_scan(struct ath11k *ar, if (ret) return ret; - ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); + if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) { + timeout = 5 * HZ; + + if (ar->supports_6ghz) + timeout += 5 * HZ; + } + + ret = wait_for_completion_timeout(&ar->scan.started, timeout); if (ret == 0) { ret = ath11k_scan_stop(ar); if (ret) @@ -3231,15 +3587,38 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; + /* Currently the pending_11d=true only happened 1 time while + * wlan interface up in ath11k_mac_11d_scan_start(), it is called by + * ath11k_mac_op_add_interface(), after wlan interface up, + * pending_11d=false always. + * If remove below wait, it always happened scan fail and lead connect + * fail while wlan interface up, because it has a 11d scan which is running + * in firmware, and lead this scan failed. + */ + if (ar->pending_11d) { + long time_left; + unsigned long timeout = 5 * HZ; + + if (ar->supports_6ghz) + timeout += 5 * HZ; + + time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout); + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac wait 11d channel list time left %ld\n", time_left); + } + memset(&arg, 0, sizeof(arg)); ath11k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; arg.scan_id = ATH11K_SCAN_ID; if (req->ie_len) { + arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg.extraie.ptr) { + ret = -ENOMEM; + goto exit; + } arg.extraie.len = req->ie_len; - arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL); - memcpy(arg.extraie.ptr, req->ie, req->ie_len); } if (req->n_ssids) { @@ -3255,10 +3634,24 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, if (req->n_channels) { arg.num_chan = req->n_channels; + arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), + GFP_KERNEL); + + if (!arg.chan_list) { + ret = -ENOMEM; + goto exit; + } + for (i = 0; i < arg.num_chan; i++) arg.chan_list[i] = req->channels[i]->center_freq; } + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + arg.scan_f_add_spoofed_mac_in_probe = 1; + ether_addr_copy(arg.mac_addr.addr, req->mac_addr); + ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); + } + ret = ath11k_start_scan(ar, &arg); if (ret) { ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret); @@ -3273,6 +3666,8 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, ATH11K_MAC_SCAN_TIMEOUT_MSECS)); exit: + kfree(arg.chan_list); + if (req->ie_len) kfree(arg.extraie.ptr); @@ -3316,9 +3711,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, return 0; if (cmd == DISABLE_KEY) { - /* TODO: Check if FW expects value other than NONE for del */ - /* arg.key_cipher = WMI_CIPHER_NONE; */ - arg.key_len = 0; + arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } @@ -3450,7 +3843,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* flush the fragments cache during key (re)install to * ensure all frags in the new frag list belong to the same key. */ - if (peer && cmd == SET_KEY) + if (peer && sta && cmd == SET_KEY) ath11k_peer_frags_flush(ar, peer); spin_unlock_bh(&ab->base_lock); @@ -3685,6 +4078,7 @@ static int ath11k_station_assoc(struct ath11k *ar, ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -3824,11 +4218,27 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) ath11k_mac_max_he_nss(he_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { - err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, - WMI_PEER_CHWIDTH, bw); - if (err) - ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", - sta->addr, bw, err); + /* Send peer assoc command before set peer bandwidth param to + * avoid the mismatch between the peer phymode and the peer + * bandwidth. + */ + ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); + + peer_arg.is_assoc = false; + err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (err) { + ath11k_warn(ar->ab, "failed to send peer assoc for STA %pM vdev %i: %d\n", + sta->addr, arvif->vdev_id, err); + } else if (wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { + err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + WMI_PEER_CHWIDTH, bw); + if (err) + ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", + sta->addr, bw, err); + } else { + ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", + sta->addr, arvif->vdev_id); + } } if (changed & IEEE80211_RC_NSS_CHANGED) { @@ -3896,6 +4306,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); + peer_arg.is_assoc = false; err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (err) ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -4095,6 +4506,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NOTEXIST)) { ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); + if (ar->ab->hw_params.vdev_start_delay && + vif->type == NL80211_IFTYPE_STATION) + goto free; + ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", @@ -4116,6 +4531,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, } spin_unlock_bh(&ar->ab->base_lock); +free: kfree(arsta->tx_stats); arsta->tx_stats = NULL; @@ -4131,6 +4547,34 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "Failed to associate station: %pM\n", sta->addr); } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = true; + + spin_unlock_bh(&ar->ab->base_lock); + + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + WMI_PEER_AUTHORIZE, + 1); + if (ret) + ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", + sta->addr, arvif->vdev_id, ret); + } + } else if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (peer) + peer->is_authorized = false; + + spin_unlock_bh(&ar->ab->base_lock); + } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || @@ -4561,6 +5005,10 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, vht_cap.vht_supported = 1; vht_cap.cap = ar->pdev->cap.vht_cap; + if (ar->pdev->cap.nss_ratio_enabled) + vht_cap.vht_mcs.tx_highest |= + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); + ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); rxmcs_map = 0; @@ -4926,23 +5374,47 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) return 0; } -int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb) { - struct sk_buff *msdu = skb; + int num_mgmt; + + ieee80211_free_txskb(ar->hw, skb); + + num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); + + if (num_mgmt < 0) + WARN_ON_ONCE(1); + + if (!num_mgmt) + wake_up(&ar->txmgmt_empty_waitq); +} + +static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id) +{ + struct sk_buff *msdu; struct ieee80211_tx_info *info; - struct ath11k *ar = ctx; - struct ath11k_base *ab = ar->ab; spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); + msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, + + if (!msdu) + return; + + dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); - ieee80211_free_txskb(ar->hw, msdu); + ath11k_mgmt_over_wmi_tx_drop(ar, msdu); +} + +int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct ath11k *ar = ctx; + + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -4951,17 +5423,10 @@ static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); - struct sk_buff *msdu = skb; struct ath11k *ar = skb_cb->ar; - struct ath11k_base *ab = ar->ab; - if (skb_cb->vif == vif) { - spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); - spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, - DMA_TO_DEVICE); - } + if (skb_cb->vif == vif) + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -4976,10 +5441,16 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, int buf_id; int ret; + ATH11K_SKB_CB(skb)->ar = ar; + spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); spin_unlock_bh(&ar->txmgmt_idr_lock); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac tx mgmt frame, buf id %d\n", buf_id); + if (buf_id < 0) return -ENOSPC; @@ -5026,7 +5497,7 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) struct sk_buff *skb; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) - ieee80211_free_txskb(ar->hw, skb); + ath11k_mgmt_over_wmi_tx_drop(ar, skb); } static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) @@ -5041,7 +5512,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) skb_cb = ATH11K_SKB_CB(skb); if (!skb_cb->vif) { ath11k_warn(ar->ab, "no vif found for mgmt frame\n"); - ieee80211_free_txskb(ar->hw, skb); + ath11k_mgmt_over_wmi_tx_drop(ar, skb); continue; } @@ -5052,16 +5523,18 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", arvif->vdev_id, ret); - ieee80211_free_txskb(ar->hw, skb); + ath11k_mgmt_over_wmi_tx_drop(ar, skb); } else { - atomic_inc(&ar->num_pending_mgmt_tx); + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac tx mgmt frame, vdev_id %d\n", + arvif->vdev_id); } } else { ath11k_warn(ar->ab, "dropping mgmt frame for vdev %d, is_started %d\n", arvif->vdev_id, arvif->is_started); - ieee80211_free_txskb(ar->hw, skb); + ath11k_mgmt_over_wmi_tx_drop(ar, skb); } } } @@ -5092,11 +5565,69 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, } skb_queue_tail(q, skb); + atomic_inc(&ar->num_pending_mgmt_tx); ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); return 0; } +int ath11k_mac_rfkill_config(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + u32 param; + int ret; + + if (ab->hw_params.rfkill_pin == 0) + return -EOPNOTSUPP; + + ath11k_dbg(ab, ATH11K_DBG_MAC, + "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", + ab->hw_params.rfkill_pin, ab->hw_params.rfkill_cfg, + ab->hw_params.rfkill_on_level); + + param = FIELD_PREP(WMI_RFKILL_CFG_RADIO_LEVEL, + ab->hw_params.rfkill_on_level) | + FIELD_PREP(WMI_RFKILL_CFG_GPIO_PIN_NUM, + ab->hw_params.rfkill_pin) | + FIELD_PREP(WMI_RFKILL_CFG_PIN_AS_GPIO, + ab->hw_params.rfkill_cfg); + + ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, + param, ar->pdev->pdev_id); + if (ret) { + ath11k_warn(ab, + "failed to set rfkill config 0x%x: %d\n", + param, ret); + return ret; + } + + return 0; +} + +int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable) +{ + enum wmi_rfkill_enable_radio param; + int ret; + + if (enable) + param = WMI_RFKILL_ENABLE_RADIO_ON; + else + param = WMI_RFKILL_ENABLE_RADIO_OFF; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac %d rfkill enable %d", + ar->pdev_idx, param); + + ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, + param, ar->pdev->pdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", + param, ret); + return ret; + } + + return 0; +} + static void ath11k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -5138,7 +5669,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, arsta = (struct ath11k_sta *)control->sta->drv_priv; ret = ath11k_dp_tx(ar, arvif, arsta, skb); - if (ret) { + if (unlikely(ret)) { ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } @@ -5222,6 +5753,14 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) goto err; } + if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { + ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr); + if (ret) { + ath11k_err(ab, "failed to set prob req oui: %i\n", ret); + goto err; + } + } + ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0, pdev->pdev_id); if (ret) { @@ -5320,6 +5859,8 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); + cancel_work_sync(&ar->ab->update_11d_work); + cancel_work_sync(&ar->ab->rfkill_work); spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { @@ -5473,6 +6014,122 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, } } +static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + struct ath11k_vif *arvif; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP) + return true; + } + } + return false; +} + +void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait) +{ + struct wmi_11d_scan_start_params param; + int ret; + + mutex_lock(&ar->ab->vdev_id_11d_lock); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev id for 11d scan %d\n", + ar->vdev_id_11d_scan); + + if (ar->regdom_set_by_user) + goto fin; + + if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) + goto fin; + + if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) + goto fin; + + if (ath11k_mac_vif_ap_active_any(ar->ab)) + goto fin; + + param.vdev_id = vdev_id; + param.start_interval_msec = 0; + param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n"); + + if (wait) + reinit_completion(&ar->finish_11d_scan); + + ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m); + if (ret) { + ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n", + vdev_id, ret); + } else { + ar->vdev_id_11d_scan = vdev_id; + if (wait) { + ar->pending_11d = true; + ret = wait_for_completion_timeout(&ar->finish_11d_scan, + 5 * HZ); + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac 11d scan left time %d\n", ret); + + if (!ret) + ar->pending_11d = false; + } + } + +fin: + mutex_unlock(&ar->ab->vdev_id_11d_lock); +} + +void ath11k_mac_11d_scan_stop(struct ath11k *ar) +{ + int ret; + u32 vdev_id; + + if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) + return; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d scan\n"); + + mutex_lock(&ar->ab->vdev_id_11d_lock); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n", + ar->vdev_id_11d_scan); + + if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) { + vdev_id = ar->vdev_id_11d_scan; + + ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id); + if (ret) + ath11k_warn(ar->ab, + "failed to stopt 11d scan vdev %d ret: %d\n", + vdev_id, ret); + else + ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; + } + mutex_unlock(&ar->ab->vdev_id_11d_lock); +} + +void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + int i; + + ath11k_dbg(ab, ATH11K_DBG_MAC, "mac stop soc 11d scan\n"); + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + + ath11k_mac_11d_scan_stop(ar); + } +} + static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -5484,7 +6141,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, u32 param_id, param_value; u16 nss; int i; - int ret; + int ret, fbret; int bit; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; @@ -5498,9 +6155,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err; } - if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { + if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) { ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n", - ar->num_created_vdevs, TARGET_NUM_VDEVS); + ar->num_created_vdevs, TARGET_NUM_VDEVS(ab)); ret = -EBUSY; goto err; } @@ -5606,6 +6263,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, arvif->vdev_id, ret); goto err_peer_del; } + + ath11k_mac_11d_scan_stop_all(ar->ab); break; case WMI_VDEV_TYPE_STA: param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; @@ -5638,12 +6297,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err_peer_del; } - ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false); + ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, + WMI_STA_PS_MODE_DISABLED); if (ret) { ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", arvif->vdev_id, ret); goto err_peer_del; } + + ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true); + break; case WMI_VDEV_TYPE_MONITOR: set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); @@ -5686,17 +6349,17 @@ err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { reinit_completion(&ar->peer_delete_done); - ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, - arvif->vdev_id); - if (ret) { + fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, + arvif->vdev_id); + if (fbret) { ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", arvif->vdev_id, vif->addr); goto err; } - ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, - vif->addr); - if (ret) + fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); + if (fbret) goto err; ar->num_peers--; @@ -5745,6 +6408,9 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n", arvif->vdev_id); + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + ath11k_mac_11d_scan_stop(ar); + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (ret) @@ -5831,7 +6497,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; @@ -5969,9 +6634,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.min_power = 0; - arg.channel.max_power = chandef->chan->max_power * 2; - arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; - arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + arg.channel.max_power = chandef->chan->max_power; + arg.channel.max_reg_power = chandef->chan->max_reg_power; + arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; @@ -6159,37 +6824,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, lockdep_assert_held(&ar->conf_mutex); - for (i = 0; i < n_vifs; i++) { - arvif = (void *)vifs[i].vif->drv_priv; - - if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) - monitor_vif = true; - - ath11k_dbg(ab, ATH11K_DBG_MAC, - "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", - arvif->vdev_id, - vifs[i].old_ctx->def.chan->center_freq, - vifs[i].new_ctx->def.chan->center_freq, - vifs[i].old_ctx->def.width, - vifs[i].new_ctx->def.width); - - if (WARN_ON(!arvif->is_started)) - continue; - - if (WARN_ON(!arvif->is_up)) - continue; - - ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); - if (ret) { - ath11k_warn(ab, "failed to down vdev %d: %d\n", - arvif->vdev_id, ret); - continue; - } - - ar->num_started_vdevs--; - } - - /* All relevant vdevs are downed and associated channel resources + /* Associated channel resources of all relevant vdevs * should be available for the channel switch now. */ @@ -6468,6 +7103,19 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = false; if (ab->hw_params.vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_STA) { + ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid); + if (ret) + ath11k_warn(ar->ab, + "failed to delete peer %pM for vdev %d: %d\n", + arvif->bssid, arvif->vdev_id, ret); + else + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac removed peer %pM vdev %d after vdev stop\n", + arvif->bssid, arvif->vdev_id); + } + + if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ath11k_wmi_vdev_down(ar, arvif->vdev_id); @@ -6481,6 +7129,9 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ret); } + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false); + mutex_unlock(&ar->conf_mutex); } @@ -6567,6 +7218,17 @@ static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v ATH11K_FLUSH_TIMEOUT); if (time_left == 0) ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); + + time_left = wait_event_timeout(ar->txmgmt_empty_waitq, + (atomic_read(&ar->num_pending_mgmt_tx) == 0), + ATH11K_FLUSH_TIMEOUT); + if (time_left == 0) + ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", + time_left); + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac mgmt tx flush mgmt pending %d\n", + atomic_read(&ar->num_pending_mgmt_tx)); } static int @@ -7264,12 +7926,45 @@ exit: return ret; } +static void ath11k_mac_put_chain_rssi(struct station_info *sinfo, + struct ath11k_sta *arsta, + char *pre, + bool clear) +{ + struct ath11k *ar = arsta->arvif->ar; + int i; + s8 rssi; + + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chains &= ~BIT(i); + rssi = arsta->chain_signal[i]; + if (clear) + arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac sta statistics %s rssi[%d] %d\n", pre, i, rssi); + + if (rssi != ATH11K_DEFAULT_NOISE_FLOOR && + rssi != ATH11K_INVALID_RSSI_FULL && + rssi != ATH11K_INVALID_RSSI_EMPTY && + rssi != 0) { + sinfo->chain_signal[i] = rssi; + sinfo->chains |= BIT(i); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + } + } +} + static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + s8 signal; + bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ar->ab->wmi_ab.svc_map); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -7277,25 +7972,47 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->tx_duration = arsta->tx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); - if (!arsta->txrate.legacy && !arsta->txrate.nss) - return; + if (arsta->txrate.legacy || arsta->txrate.nss) { + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + sinfo->txrate.he_gi = arsta->txrate.he_gi; + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } - if (arsta->txrate.legacy) { - sinfo->txrate.legacy = arsta->txrate.legacy; - } else { - sinfo->txrate.mcs = arsta->txrate.mcs; - sinfo->txrate.nss = arsta->txrate.nss; - sinfo->txrate.bw = arsta->txrate.bw; - sinfo->txrate.he_gi = arsta->txrate.he_gi; - sinfo->txrate.he_dcm = arsta->txrate.he_dcm; - sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false); + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) && + arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && + ar->ab->hw_params.supports_rssi_stats && + !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_RSSI_PER_CHAIN_STAT)) { + ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true); } - sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); - /* TODO: Use real NF instead of default one. */ - sinfo->signal = arsta->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + signal = arsta->rssi_comb; + if (!signal && + arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && + ar->ab->hw_params.supports_rssi_stats && + !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_VDEV_STAT))) + signal = arsta->rssi_beacon; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "mac sta statistics db2dbm %u rssi comb %d rssi beacon %d\n", + db2dbm, arsta->rssi_comb, arsta->rssi_beacon); + + if (signal) { + sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + } } static const struct ieee80211_ops ath11k_ops = { @@ -7633,6 +8350,9 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes; + if (ab->hw_params.single_pdev_only && ar->supports_6ghz) + ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); @@ -7672,7 +8392,8 @@ static int __ath11k_mac_register(struct ath11k *ar) * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ - if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz) + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || + (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz)) ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; @@ -7688,11 +8409,16 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; - ar->max_num_stations = TARGET_NUM_STATIONS; - ar->max_num_peers = TARGET_NUM_PEERS_PDEV; + ar->max_num_stations = TARGET_NUM_STATIONS(ab); + ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab); ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; + if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { + ar->hw->wiphy->features |= + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + } + ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; @@ -7703,6 +8429,9 @@ static int __ath11k_mac_register(struct ath11k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); + if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_BSS_COLOR); ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -7784,7 +8513,7 @@ int ath11k_mac_register(struct ath11k_base *ab) /* Initialize channel counters frequency value in hertz */ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; - ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; @@ -7802,6 +8531,8 @@ int ath11k_mac_register(struct ath11k_base *ab) ret = __ath11k_mac_register(ar); if (ret) goto err_cleanup; + + init_waitqueue_head(&ar->txmgmt_empty_waitq); } return 0; @@ -7879,6 +8610,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab) ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); + ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; + init_completion(&ar->finish_11d_scan); + init_completion(&ar->finish_11d_ch_list); } return 0; diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 254ca4acc8e8..0e6c870b09c8 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -127,6 +127,13 @@ struct ath11k_generic_iter { extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default; +#define ATH11K_SCAN_11D_INTERVAL 600000 +#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF + +void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait); +void ath11k_mac_11d_scan_stop(struct ath11k *ar); +void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab); + void ath11k_mac_destroy(struct ath11k_base *ab); void ath11k_mac_unregister(struct ath11k_base *ab); int ath11k_mac_register(struct ath11k_base *ab); @@ -140,10 +147,16 @@ u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, void __ath11k_mac_scan_finish(struct ath11k *ar); void ath11k_mac_scan_finish(struct ath11k *ar); +int ath11k_mac_rfkill_enable_radio(struct ath11k *ar, bool enable); +int ath11k_mac_rfkill_config(struct ath11k *ar); struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id); struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, u32 vdev_id); +u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar); +u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif); +struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab); + struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id); struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id); @@ -151,8 +164,12 @@ void ath11k_mac_drain_tx(struct ath11k *ar); void ath11k_mac_peer_cleanup_all(struct ath11k *ar); int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); u8 ath11k_mac_bw_to_mac80211_bw(u8 bw); +u32 ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi); +enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy); +enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones); enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw); enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); +void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif); #endif diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 26c7ae242db6..e4250ba8dfee 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -3,6 +3,9 @@ #include <linux/msi.h> #include <linux/pci.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/ioport.h> #include "core.h" #include "debug.h" @@ -248,6 +251,7 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) u32 user_base_data, base_vector; int ret, num_vectors, i; int *irq; + unsigned int msi_data; ret = ath11k_pci_get_user_msi_assignment(ab_pci, "MHI", &num_vectors, @@ -262,9 +266,15 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) if (!irq) return -ENOMEM; - for (i = 0; i < num_vectors; i++) + for (i = 0; i < num_vectors; i++) { + msi_data = base_vector; + + if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + msi_data += i; + irq[i] = ath11k_pci_get_msi_irq(ab->dev, - base_vector + i); + msi_data); + } ab_pci->mhi_ctrl->irq = irq; ab_pci->mhi_ctrl->nr_irqs = num_vectors; @@ -311,6 +321,26 @@ static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl, writel(val, addr); } +static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl) +{ + struct device_node *np; + struct resource res; + int ret; + + np = of_find_node_by_type(NULL, "memory"); + if (!np) + return -ENOENT; + + ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + mhi_ctrl->iova_start = res.start + 0x1000000; + mhi_ctrl->iova_stop = res.end; + + return 0; +} + int ath11k_mhi_register(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; @@ -339,8 +369,18 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) return ret; } - mhi_ctrl->iova_start = 0; - mhi_ctrl->iova_stop = 0xffffffff; + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; + + if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { + ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl); + if (ret < 0) + return ret; + } else { + mhi_ctrl->iova_start = 0; + mhi_ctrl->iova_stop = 0xFFFFFFFF; + } + mhi_ctrl->sbl_size = SZ_512K; mhi_ctrl->seg_len = SZ_512K; mhi_ctrl->fbc_download = true; @@ -356,6 +396,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) break; case ATH11K_HW_QCA6390_HW20: case ATH11K_HW_WCN6855_HW20: + case ATH11K_HW_WCN6855_HW21: ath11k_mhi_config = &ath11k_mhi_config_qca6390; break; default: @@ -533,7 +574,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = mhi_pm_suspend(ab_pci->mhi_ctrl); break; case ATH11K_MHI_RESUME: - ret = mhi_pm_resume(ab_pci->mhi_ctrl); + /* Do force MHI resume as some devices like QCA6390, WCN6855 + * are not in M3 state but they are functional. So just ignore + * the MHI state while resuming. + */ + ret = mhi_pm_resume_force(ab_pci->mhi_ctrl); break; case ATH11K_MHI_TRIGGER_RDDM: ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 3d353e7c9d5c..d73b522a0081 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> +#include <linux/of.h> #include "pci.h" #include "core.h" @@ -16,7 +17,8 @@ #define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 32 -#define ATH11K_PCI_IRQ_CE0_OFFSET 3 +#define ATH11K_PCI_IRQ_CE0_OFFSET 3 +#define ATH11K_PCI_IRQ_DP_OFFSET 14 #define WINDOW_ENABLE_BIT 0x40000000 #define WINDOW_REG_ADDRESS 0x310c @@ -25,7 +27,7 @@ #define WINDOW_RANGE_MASK GENMASK(18, 0) #define TCSR_SOC_HW_VERSION 0x0224 -#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8) +#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) /* BAR0 + 4k is always accessible, and no @@ -76,6 +78,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = { }, }; +static const struct ath11k_msi_config msi_config_one_msi = { + .total_vectors = 1, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 1, .base_vector = 0 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, + { .name = "DP", .num_vectors = 1, .base_vector = 0 }, + }, +}; + static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { "bhi", "mhi-er0", @@ -182,7 +195,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. */ - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); @@ -206,7 +220,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) } } - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } @@ -219,7 +234,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. */ - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); @@ -243,7 +259,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) } } - if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); @@ -481,11 +498,11 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam for (idx = 0; idx < msi_config->total_users; idx++) { if (strcmp(user_name, msi_config->users[idx].name) == 0) { *num_vectors = msi_config->users[idx].num_vectors; - *user_base_data = msi_config->users[idx].base_vector - + ab_pci->msi_ep_base_data; - *base_vector = msi_config->users[idx].base_vector; + *base_vector = msi_config->users[idx].base_vector; + *user_base_data = *base_vector + ab_pci->msi_ep_base_data; - ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + ath11k_dbg(ab, ATH11K_DBG_PCI, + "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", user_name, *num_vectors, *user_base_data, *base_vector); @@ -556,16 +573,30 @@ static void ath11k_pci_free_irq(struct ath11k_base *ab) static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 irq_idx; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; enable_irq(ab->irq_num[irq_idx]); } static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 irq_idx; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; disable_irq_nosync(ab->irq_num[irq_idx]); } @@ -574,6 +605,8 @@ static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab) { int i; + clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; @@ -598,20 +631,27 @@ static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) static void ath11k_pci_ce_tasklet(struct tasklet_struct *t) { struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); - ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); + enable_irq(ce_pipe->ab->irq_num[irq_idx]); } static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; + struct ath11k_base *ab = ce_pipe->ab; + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; /* last interrupt received for this CE */ ce_pipe->timestamp = jiffies; - ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); + disable_irq_nosync(ab->irq_num[irq_idx]); + tasklet_schedule(&ce_pipe->intr_tq); return IRQ_HANDLED; @@ -619,8 +659,15 @@ static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); int i; + /* In case of one MSI vector, we handle irq enable/disable + * in a uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } @@ -629,20 +676,32 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) { int i; + clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; ath11k_pci_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); int i; + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } @@ -651,10 +710,15 @@ static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) { int i; + set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_pci_ext_grp_enable(irq_grp); } } @@ -686,11 +750,13 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) napi); struct ath11k_base *ab = irq_grp->ab; int work_done; + int i; work_done = ath11k_dp_service_srng(ab, irq_grp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); - ath11k_pci_ext_grp_enable(irq_grp); + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } if (work_done > budget) @@ -702,13 +768,19 @@ static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) { struct ath11k_ext_irq_grp *irq_grp = arg; + struct ath11k_base *ab = irq_grp->ab; + int i; + + if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); /* last interrupt received for this group */ irq_grp->timestamp = jiffies; - ath11k_pci_ext_grp_disable(irq_grp); + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); napi_schedule(&irq_grp->napi); @@ -717,10 +789,10 @@ static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); int i, j, ret, num_vectors = 0; - u32 user_base_data = 0, base_vector = 0, base_idx; + u32 user_base_data = 0, base_vector = 0; - base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", &num_vectors, &user_base_data, @@ -750,7 +822,7 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) } irq_grp->num_irq = num_irq; - irq_grp->irqs[0] = base_idx + i; + irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; @@ -764,23 +836,32 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, - IRQF_SHARED, + ab_pci->irq_flags, "DP_EXT_IRQ", irq_grp); if (ret) { ath11k_err(ab, "failed request irq %d: %d\n", vector, ret); return ret; } - - disable_irq_nosync(ab->irq_num[irq_idx]); } + ath11k_pci_ext_grp_disable(irq_grp); } return 0; } +static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m) +{ + if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return 0; + + return irq_set_affinity_hint(ab_pci->pdev->irq, m); +} + static int ath11k_pci_config_irq(struct ath11k_base *ab) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct ath11k_ce_pipe *ce_pipe; u32 msi_data_start; u32 msi_data_count, msi_data_idx; @@ -794,6 +875,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) if (ret) return ret; + ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath11k_err(ab, "failed to set irq affinity %d\n", ret); + return ret; + } + /* Configure CE irqs */ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) @@ -808,12 +895,12 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, - IRQF_SHARED, irq_name[irq_idx], + ab_pci->irq_flags, irq_name[irq_idx], ce_pipe); if (ret) { ath11k_err(ab, "failed to request irq %d: %d\n", irq_idx, ret); - return ret; + goto err_irq_affinity_cleanup; } ab->irq_num[irq_idx] = irq; @@ -824,9 +911,13 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) ret = ath11k_pci_ext_irq_config(ab); if (ret) - return ret; + goto err_irq_affinity_cleanup; return 0; + +err_irq_affinity_cleanup: + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + return ret; } static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) @@ -848,6 +939,8 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) { int i; + set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; @@ -892,15 +985,25 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) msi_config->total_vectors, msi_config->total_vectors, PCI_IRQ_MSI); - if (num_vectors != msi_config->total_vectors) { - ath11k_err(ab, "failed to get %d MSI vectors, only %d available", - msi_config->total_vectors, num_vectors); - - if (num_vectors >= 0) - return -EINVAL; - else - return num_vectors; + if (num_vectors == msi_config->total_vectors) { + set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); + ab_pci->irq_flags = IRQF_SHARED; + } else { + num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, + 1, + 1, + PCI_IRQ_MSI); + if (num_vectors < 0) { + ret = -EINVAL; + goto reset_msi_config; + } + clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); + ab_pci->msi_config = &msi_config_one_msi; + ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; + ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n"); } + ath11k_info(ab, "MSI vectors: %d\n", num_vectors); + ath11k_pci_msi_disable(ab_pci); msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); @@ -921,6 +1024,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) free_msi_vector: pci_free_irq_vectors(ab_pci->pdev); +reset_msi_config: return ret; } @@ -929,6 +1033,25 @@ static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci) pci_free_irq_vectors(ab_pci->pdev); } +static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci) +{ + struct msi_desc *msi_desc; + + msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); + if (!msi_desc) { + ath11k_err(ab_pci->ab, "msi_desc is NULL!\n"); + pci_free_irq_vectors(ab_pci->pdev); + return -EINVAL; + } + + ab_pci->msi_ep_base_data = msi_desc->msg.data; + + ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", + ab_pci->msi_ep_base_data); + + return 0; +} + static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) { struct ath11k_base *ab = ab_pci->ab; @@ -1126,7 +1249,13 @@ static int ath11k_pci_start(struct ath11k_base *ab) set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - ath11k_pci_aspm_restore(ab_pci); + /* TODO: for now don't restore ASPM in case of single MSI + * vector as MHI register reading in M2 causes system hang. + */ + if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + ath11k_pci_aspm_restore(ab_pci); + else + ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); ath11k_pci_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); @@ -1225,7 +1354,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev, { struct ath11k_base *ab; struct ath11k_pci *ab_pci; - u32 soc_hw_version_major, soc_hw_version_minor; + u32 soc_hw_version_major, soc_hw_version_minor, addr; int ret; ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, @@ -1245,12 +1374,29 @@ static int ath11k_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ab); spin_lock_init(&ab_pci->window_lock); + /* Set fixed_mem_region to true for platforms support reserved memory + * from DT. If memory is reserved from DT for FW, ath11k driver need not + * allocate memory. + */ + ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr); + if (!ret) + set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags); + ret = ath11k_pci_claim(ab_pci, pdev); if (ret) { ath11k_err(ab, "failed to claim device: %d\n", ret); goto err_free_core; } + ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", + pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + ab->id.vendor = pdev->vendor; + ab->id.device = pdev->device; + ab->id.subsystem_vendor = pdev->subsystem_vendor; + ab->id.subsystem_device = pdev->subsystem_device; + switch (pci_dev->device) { case QCA6390_DEVICE_ID: ath11k_pci_read_hw_version(ab, &soc_hw_version_major, @@ -1273,13 +1419,26 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; case WCN6855_DEVICE_ID: + ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD; ath11k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { case 2: - ab->hw_rev = ATH11K_HW_WCN6855_HW20; + switch (soc_hw_version_minor) { + case 0x00: + case 0x01: + ab->hw_rev = ATH11K_HW_WCN6855_HW20; + break; + case 0x10: + case 0x11: + ab->hw_rev = ATH11K_HW_WCN6855_HW21; + break; + default: + goto unsupported_wcn6855_soc; + } break; default: +unsupported_wcn6855_soc: dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n", soc_hw_version_major, soc_hw_version_minor); ret = -EOPNOTSUPP; @@ -1328,6 +1487,17 @@ static int ath11k_pci_probe(struct pci_dev *pdev, goto err_ce_free; } + /* kernel may allocate a dummy vector before request_irq and + * then allocate a real vector when request_irq is called. + * So get msi_data here again to avoid spurious interrupt + * as msi_data will configured to srngs. + */ + ret = ath11k_pci_config_msi_data(ab_pci); + if (ret) { + ath11k_err(ab, "failed to config msi_data: %d\n", ret); + goto err_free_irq; + } + ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); @@ -1364,6 +1534,8 @@ static void ath11k_pci_remove(struct pci_dev *pdev) struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_pci_power_down(ab); ath11k_debugfs_soc_destroy(ab); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index f3e645891d19..61d67b20a0eb 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -68,6 +68,7 @@ enum ath11k_pci_flags { ATH11K_PCI_FLAG_INIT_DONE, ATH11K_PCI_FLAG_IS_MSI_64, ATH11K_PCI_ASPM_RESTORE, + ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, }; struct ath11k_pci { @@ -87,6 +88,8 @@ struct ath11k_pci { /* enum ath11k_pci_flags */ unsigned long flags; u16 link_ctl; + + unsigned long irq_flags; }; static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h index 619db001be8e..63fe5665badf 100644 --- a/drivers/net/wireless/ath/ath11k/peer.h +++ b/drivers/net/wireless/ath/ath11k/peer.h @@ -28,6 +28,7 @@ struct ath11k_peer { u8 ucast_keyidx; u16 sec_type; u16 sec_type_grp; + bool is_authorized; }; void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index fa73118de6db..65d3c6ba35ae 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -9,6 +9,8 @@ #include "core.h" #include "debug.h" #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/ioport.h> #include <linux/firmware.h> #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 @@ -1582,11 +1584,55 @@ static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = { }, }; +static struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, + enablefwlog_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, + enablefwlog), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) { struct qmi_wlanfw_host_cap_req_msg_v01 req; struct qmi_wlanfw_host_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -1640,6 +1686,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } @@ -1705,6 +1752,7 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; @@ -1734,7 +1782,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0, i; bool delayed; @@ -1749,7 +1797,9 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) * failure to FW and FW will then request mulitple blocks of small * chunk size memory. */ - if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) { + if (!(ab->bus_params.fixed_mem_region || + test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && + ab->qmi.target_mem_delayed) { delayed = true; ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n", ab->qmi.mem_seg_count); @@ -1783,6 +1833,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; @@ -1815,10 +1866,12 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab) { int i; - if (ab->bus_params.fixed_mem_region) - return; - for (i = 0; i < ab->qmi.mem_seg_count; i++) { + if ((ab->bus_params.fixed_mem_region || + test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && + ab->qmi.target_mem[i].iaddr) + iounmap(ab->qmi.target_mem[i].iaddr); + if (!ab->qmi.target_mem[i].vaddr) continue; @@ -1866,10 +1919,44 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) { - int i, idx; + struct device *dev = ab->dev; + struct device_node *hremote_node = NULL; + struct resource res; + u32 host_ddr_sz; + int i, idx, ret; for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) { switch (ab->qmi.target_mem[i].type) { + case HOST_DDR_REGION_TYPE: + hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!hremote_node) { + ath11k_dbg(ab, ATH11K_DBG_QMI, + "qmi fail to get hremote_node\n"); + return ret; + } + + ret = of_address_to_resource(hremote_node, 0, &res); + if (ret) { + ath11k_dbg(ab, ATH11K_DBG_QMI, + "qmi fail to get reg from hremote\n"); + return ret; + } + + if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) { + ath11k_dbg(ab, ATH11K_DBG_QMI, + "qmi fail to assign memory of sz\n"); + return -EINVAL; + } + + ab->qmi.target_mem[idx].paddr = res.start; + ab->qmi.target_mem[idx].iaddr = + ioremap(ab->qmi.target_mem[idx].paddr, + ab->qmi.target_mem[i].size); + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + host_ddr_sz = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; case BDF_MEM_REGION_TYPE: ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr; ab->qmi.target_mem[idx].vaddr = NULL; @@ -1884,10 +1971,16 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) } if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) { - ab->qmi.target_mem[idx].paddr = - ATH11K_QMI_CALDB_ADDRESS; - ab->qmi.target_mem[idx].vaddr = - (void *)ATH11K_QMI_CALDB_ADDRESS; + if (hremote_node) { + ab->qmi.target_mem[idx].paddr = + res.start + host_ddr_sz; + ab->qmi.target_mem[idx].iaddr = + ioremap(ab->qmi.target_mem[idx].paddr, + ab->qmi.target_mem[i].size); + } else { + ab->qmi.target_mem[idx].paddr = + ATH11K_QMI_CALDB_ADDRESS; + } } else { ab->qmi.target_mem[idx].paddr = 0; ab->qmi.target_mem[idx].vaddr = NULL; @@ -1911,7 +2004,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req; struct qmi_wlanfw_cap_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; int r; @@ -1930,6 +2023,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; @@ -2000,7 +2094,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; const u8 *temp = data; void __iomem *bdf_addr = NULL; int ret; @@ -2111,7 +2205,8 @@ err_free_req: return ret; } -static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) +static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab, + bool regdb) { struct device *dev = ab->dev; char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE]; @@ -2122,13 +2217,21 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) const u8 *tmp; memset(&bd, 0, sizeof(bd)); - ret = ath11k_core_fetch_bdf(ab, &bd); - if (ret) { - ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret); - goto out; + + if (regdb) { + ret = ath11k_core_fetch_regdb(ab, &bd); + } else { + ret = ath11k_core_fetch_bdf(ab, &bd); + if (ret) + ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret); } - if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) + if (ret) + goto out; + + if (regdb) + bdf_type = ATH11K_QMI_BDF_TYPE_REGDB; + else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) bdf_type = ATH11K_QMI_BDF_TYPE_ELF; else bdf_type = ATH11K_QMI_BDF_TYPE_BIN; @@ -2143,8 +2246,8 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) goto out; } - /* QCA6390 does not support cal data, skip it */ - if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF) + /* QCA6390/WCN6855 does not support cal data, skip it */ + if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB) goto out; if (ab->qmi.target.eeprom_caldata) { @@ -2245,7 +2348,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; struct qmi_wlanfw_m3_info_req_msg_v01 req; struct qmi_wlanfw_m3_info_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -2277,6 +2380,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; @@ -2303,7 +2407,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, { struct qmi_wlanfw_wlan_mode_req_msg_v01 req; struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); @@ -2325,6 +2429,7 @@ static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; @@ -2358,7 +2463,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; struct ce_pipe_config *ce_cfg; struct service_to_pipe *svc_cfg; - struct qmi_txn txn = {}; + struct qmi_txn txn; int ret = 0, pipe_num; ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; @@ -2419,6 +2524,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { + qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; @@ -2442,6 +2548,48 @@ out: return ret; } +static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable) +{ + int ret; + struct qmi_txn txn; + struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; + struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {}; + + req.enablefwlog_valid = true; + req.enablefwlog = enable ? 1 : 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_WLAN_INI_REQ_V01, + QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n", + ret); + qmi_txn_cancel(&txn); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + } + +out: + return ret; +} + void ath11k_qmi_firmware_stop(struct ath11k_base *ab) { int ret; @@ -2462,6 +2610,14 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware start\n"); + if (ab->hw_params.fw_wmi_diag_event) { + ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret); + return ret; + } + } + ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret); @@ -2573,7 +2729,10 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) return ret; } - ret = ath11k_qmi_load_bdf_qmi(ab); + if (ab->hw_params.supports_regdb) + ath11k_qmi_load_bdf_qmi(ab, true); + + ret = ath11k_qmi_load_bdf_qmi(ab, false); if (ret < 0) { ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; @@ -2614,7 +2773,8 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, msg->mem_seg[i].type, msg->mem_seg[i].size); } - if (ab->bus_params.fixed_mem_region) { + if (ab->bus_params.fixed_mem_region || + test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { ath11k_warn(ab, "failed to assign qmi target memory: %d\n", @@ -2823,7 +2983,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab) memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk)); ab->qmi.ab = ab; - ab->qmi.target_mem_mode = ATH11K_QMI_TARGET_MEM_MODE_DEFAULT; + ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode; ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX, &ath11k_qmi_ops, ath11k_qmi_msg_handlers); if (ret < 0) { diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 3bb0f9ef7996..ba2eff4d59cb 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -34,14 +34,13 @@ #define QMI_WLANFW_MAX_DATA_SIZE_V01 6144 #define ATH11K_FIRMWARE_MODE_OFF 4 -#define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0 #define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ) struct ath11k_base; enum ath11k_qmi_file_type { ATH11K_QMI_FILE_TYPE_BDF_GOLDEN, - ATH11K_QMI_FILE_TYPE_CALDATA, + ATH11K_QMI_FILE_TYPE_CALDATA = 2, ATH11K_QMI_FILE_TYPE_EEPROM, ATH11K_QMI_MAX_FILE_TYPE, }; @@ -49,6 +48,7 @@ enum ath11k_qmi_file_type { enum ath11k_qmi_bdf_type { ATH11K_QMI_BDF_TYPE_BIN = 0, ATH11K_QMI_BDF_TYPE_ELF = 1, + ATH11K_QMI_BDF_TYPE_REGDB = 4, }; enum ath11k_qmi_event_type { @@ -95,6 +95,7 @@ struct target_mem_chunk { u32 type; dma_addr_t paddr; u32 *vaddr; + void __iomem *iaddr; }; struct target_info { @@ -427,10 +428,12 @@ struct qmi_wlanfw_m3_info_resp_msg_v01 { #define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7 #define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803 #define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7 +#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 4 #define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022 #define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022 #define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023 #define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023 +#define QMI_WLANFW_WLAN_INI_REQ_V01 0x002F #define QMI_WLANFW_MAX_STR_LEN_V01 16 #define QMI_WLANFW_MAX_NUM_CE_V01 12 #define QMI_WLANFW_MAX_NUM_SVC_V01 24 @@ -472,6 +475,16 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { struct qmi_response_type_v01 resp; }; +struct qmi_wlanfw_wlan_ini_req_msg_v01 { + /* Must be set to true if enablefwlog is being passed */ + u8 enablefwlog_valid; + u8 enablefwlog; +}; + +struct qmi_wlanfw_wlan_ini_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + int ath11k_qmi_firmware_start(struct ath11k_base *ab, u32 mode); void ath11k_qmi_firmware_stop(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index a66b5bdd2167..d6575feca5a2 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -2,6 +2,8 @@ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ +#include <linux/rtnetlink.h> + #include "core.h" #include "debug.h" @@ -86,6 +88,9 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) if (ret) ath11k_warn(ar->ab, "INIT Country code set to fw failed : %d\n", ret); + + ath11k_mac_11d_scan_stop(ar); + ar->regdom_set_by_user = true; } int ath11k_reg_update_chan_list(struct ath11k *ar) @@ -179,6 +184,11 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); kfree(params); + if (ar->pending_11d) { + complete(&ar->finish_11d_ch_list); + ar->pending_11d = false; + } + return ret; } @@ -244,8 +254,15 @@ int ath11k_regd_update(struct ath11k *ar) goto err; } + if (ar->pending_11d) + complete(&ar->finish_11d_scan); + rtnl_lock(); wiphy_lock(ar->hw->wiphy); + + if (ar->pending_11d) + reinit_completion(&ar->finish_11d_ch_list); + ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); wiphy_unlock(ar->hw->wiphy); rtnl_unlock(); @@ -456,6 +473,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) { u16 bw; + if (end_freq <= start_freq) + return 0; + bw = end_freq - start_freq; bw = min_t(u16, bw, max_bw); @@ -463,8 +483,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) bw = 80; else if (bw >= 40 && bw < 80) bw = 40; - else if (bw < 40) + else if (bw >= 20 && bw < 40) bw = 20; + else + bw = 0; return bw; } @@ -488,73 +510,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab, struct cur_reg_rule *reg_rule, u8 *rule_idx, u32 flags, u16 max_bw) { + u32 start_freq; u32 end_freq; u16 bw; u8 i; i = *rule_idx; + /* there might be situations when even the input rule must be dropped */ + i--; + + /* frequencies below weather radar */ bw = ath11k_reg_adjust_bw(reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, - ETSI_WEATHER_RADAR_BAND_LOW, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH) - end_freq = ETSI_WEATHER_RADAR_BAND_HIGH; - else - end_freq = reg_rule->end_freq; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } - bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - max_bw); + /* weather radar frequencies */ + start_freq = max_t(u32, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW); + end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH); - i++; + bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, - ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, start_freq, + end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, flags); - regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; + regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (end_freq == reg_rule->end_freq) { - regd->n_reg_rules--; - *rule_idx = i; - return; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, start_freq, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, flags); } + /* frequencies above weather radar */ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, max_bw); + if (bw > 0) { + i++; - i++; - - ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, - reg_rule->end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } *rule_idx = i; } diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index ac4da99b5577..4100cc1449a2 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -581,6 +581,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar, u16 length, freq; u8 chan_width_mhz, bin_sz; int ret; + u32 check_length; lockdep_assert_held(&ar->spectral.lock); @@ -614,6 +615,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar, return -EINVAL; } + check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz); + ret = ath11k_dbring_validate_buffer(ar, data, check_length); + if (ret) { + ath11k_warn(ar->ab, "found magic value in fft data, dropping\n"); + return ret; + } + ret = ath11k_spectral_pull_search(ar, data, &search); if (ret) { ath11k_warn(ab, "failed to pull search report %d\n", ret); @@ -747,6 +755,12 @@ static int ath11k_spectral_process_data(struct ath11k *ar, goto err; } + ret = ath11k_dbring_validate_buffer(ar, data, tlv_len); + if (ret) { + ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n"); + goto err; + } + summary = (struct spectral_summary_fft_report *)tlv; ath11k_spectral_pull_summary(ar, ¶m->meta, summary, &summ_rpt); diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c index f0cc49ba0387..6620650d7845 100644 --- a/drivers/net/wireless/ath/ath11k/trace.c +++ b/drivers/net/wireless/ath/ath11k/trace.c @@ -7,3 +7,4 @@ #define CREATE_TRACE_POINTS #include "trace.h" +EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg); diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h index 25d18e9d5b0b..a02e54735e88 100644 --- a/drivers/net/wireless/ath/ath11k/trace.h +++ b/drivers/net/wireless/ath/ath11k/trace.h @@ -14,12 +14,24 @@ #if !defined(CONFIG_ATH11K_TRACING) #undef TRACE_EVENT #define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} \ +static inline bool trace_##name##_enabled(void) \ +{ \ + return false; \ +} + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ static inline void trace_ ## name(proto) {} #endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */ #undef TRACE_SYSTEM #define TRACE_SYSTEM ath11k +#define ATH11K_MSG_MAX 400 + TRACE_EVENT(ath11k_htt_pktlog, TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len, u32 pktlog_checksum), @@ -108,6 +120,194 @@ TRACE_EVENT(ath11k_htt_rxdesc, ) ); +DECLARE_EVENT_CLASS(ath11k_log_event, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf), + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __dynamic_array(char, msg, ATH11K_MSG_MAX) + ), + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH11K_MSG_MAX, + vaf->fmt, + *vaf->va) >= ATH11K_MSG_MAX); + ), + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_err, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_warn, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +DEFINE_EVENT(ath11k_log_event, ath11k_log_info, + TP_PROTO(struct ath11k_base *ab, struct va_format *vaf), + TP_ARGS(ab, vaf) +); + +TRACE_EVENT(ath11k_wmi_cmd, + TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len), + + TP_ARGS(ab, id, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), + __entry->id, + __entry->buf_len + ) +); + +TRACE_EVENT(ath11k_wmi_event, + TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len), + + TP_ARGS(ab, id, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, id) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s id %d len %zu", + __get_str(driver), + __get_str(device), + __entry->id, + __entry->buf_len + ) +); + +TRACE_EVENT(ath11k_log_dbg, + TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf), + + TP_ARGS(ab, level, vaf), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(unsigned int, level) + __dynamic_array(char, msg, ATH11K_MSG_MAX) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->level = level; + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + ATH11K_MSG_MAX, vaf->fmt, + *vaf->va) >= ATH11K_MSG_MAX); + ), + + TP_printk( + "%s %s %s", + __get_str(driver), + __get_str(device), + __get_str(msg) + ) +); + +TRACE_EVENT(ath11k_log_dbg_dump, + TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix, + const void *buf, size_t buf_len), + + TP_ARGS(ab, msg, prefix, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __string(msg, msg) + __string(prefix, prefix) + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __assign_str(msg, msg); + __assign_str(prefix, prefix); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "%s %s %s/%s\n", + __get_str(driver), + __get_str(device), + __get_str(prefix), + __get_str(msg) + ) +); + +TRACE_EVENT(ath11k_wmi_diag, + TP_PROTO(struct ath11k_base *ab, const void *data, size_t len), + + TP_ARGS(ab, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ab->dev)) + __string(driver, dev_driver_string(ab->dev)) + __field(u16, len) + __dynamic_array(u8, data, len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ab->dev)); + __assign_str(driver, dev_driver_string(ab->dev)); + __entry->len = len; + memcpy(__get_dynamic_array(data), data, len); + ), + + TP_printk( + "%s %s tlv diag len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 5ae2ef4680d6..6b68ccf65e39 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -73,6 +73,14 @@ struct wmi_tlv_dma_buf_release_parse { bool meta_data_done; }; +struct wmi_tlv_fw_stats_parse { + const struct wmi_stats_event *ev; + const struct wmi_per_chain_rssi_stats *rssi; + struct ath11k_fw_stats *stats; + int rssi_num; + bool chain_rssi_done; +}; + static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, @@ -120,6 +128,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, [WMI_TAG_STATS_EVENT] = { .min_len = sizeof(struct wmi_stats_event) }, + [WMI_TAG_RFKILL_EVENT] = { + .min_len = sizeof(struct wmi_rfkill_state_change_ev) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { @@ -128,6 +138,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, + [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { + .min_len = sizeof(struct wmi_obss_color_collision_event) }, + [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { + .min_len = sizeof(struct wmi_11d_new_cc_ev) }, + [WMI_TAG_PER_CHAIN_RSSI_STATS] = { + .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, }; #define PRIMAP(_hw_mode_) \ @@ -249,6 +265,8 @@ static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buf cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = cmd; + trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len); + memset(skb_cb, 0, sizeof(*skb_cb)); ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); @@ -267,21 +285,39 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, { struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab; int ret = -EOPNOTSUPP; + struct ath11k_base *ab = wmi_sc->ab; might_sleep(); - wait_event_timeout(wmi_sc->tx_credits_wq, ({ - ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + if (ab->hw_params.credit_flow) { + wait_event_timeout(wmi_sc->tx_credits_wq, ({ + ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); - if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags)) - ret = -ESHUTDOWN; + if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, + &wmi_sc->ab->dev_flags)) + ret = -ESHUTDOWN; - (ret != -EAGAIN); - }), WMI_SEND_TIMEOUT_HZ); + (ret != -EAGAIN); + }), WMI_SEND_TIMEOUT_HZ); + } else { + wait_event_timeout(wmi->tx_ce_desc_wq, ({ + ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + + if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, + &wmi_sc->ab->dev_flags)) + ret = -ESHUTDOWN; + + (ret != -ENOBUFS); + }), WMI_SEND_TIMEOUT_HZ); + } if (ret == -EAGAIN) ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id); + if (ret == -ENOBUFS) + ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n", + cmd_id); + return ret; } @@ -315,6 +351,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, struct ath11k_pdev *pdev) { struct wmi_mac_phy_capabilities *mac_phy_caps; + struct ath11k_base *ab = wmi_handle->wmi_ab->ab; struct ath11k_band_cap *cap_band; struct ath11k_pdev_cap *pdev_cap = &pdev->cap; u32 phy_map; @@ -346,6 +383,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, pdev->pdev_id = mac_phy_caps->pdev_id; pdev_cap->supported_bands |= mac_phy_caps->supported_bands; pdev_cap->ampdu_density = mac_phy_caps->ampdu_density; + ab->target_pdev_ids[ab->target_pdev_count].supported_bands = + mac_phy_caps->supported_bands; + ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; + ab->target_pdev_count++; /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be @@ -485,6 +526,8 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; cap->num_msdu_desc = ev->num_msdu_desc; + ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info); + return 0; } @@ -1244,7 +1287,8 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, return ret; } -int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) +int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, + enum wmi_sta_ps_mode psmode) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_ps_mode_cmd *cmd; @@ -1259,7 +1303,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; - cmd->sta_ps_mode = enable; + cmd->sta_ps_mode = psmode; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); if (ret) { @@ -1269,7 +1313,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable) ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev set psmode %d vdev id %d\n", - enable, vdev_id); + psmode, vdev_id); return ret; } @@ -1612,6 +1656,15 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); + struct ieee80211_vif *vif; + struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id); + + if (!arvif) { + ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id); + return -EINVAL; + } + + vif = arvif->vif; len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; @@ -1624,8 +1677,12 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->tim_ie_offset = offs->tim_offset; - cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; - cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; + + if (vif->csa_active) { + cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; + cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; + } + cmd->buf_len = bcn->len; ptr = skb->data + sizeof(*cmd); @@ -1689,7 +1746,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); - memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); + if (arg->key_data) + memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { @@ -1762,7 +1820,7 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, cmd->peer_flags |= WMI_PEER_AUTH; if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; - if (!hw_crypto_disabled) + if (!hw_crypto_disabled && param->is_assoc) cmd->peer_flags &= ~WMI_PEER_AUTH; } if (param->need_gtk_2_way) @@ -2069,7 +2127,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, void *ptr; int i, ret, len; u32 *tmp_ptr; - u8 extraie_len_with_pad = 0; + u16 extraie_len_with_pad = 0; struct hint_short_ssid *s_ssid = NULL; struct hint_bssid *hint_bssid = NULL; @@ -2088,7 +2146,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, len += sizeof(*bssid) * params->num_bssid; len += TLV_HDR_SIZE; - if (params->extraie.len) + if (params->extraie.len && params->extraie.len <= 0xFFFF) extraie_len_with_pad = roundup(params->extraie.len, sizeof(u32)); len += extraie_len_with_pad; @@ -2137,6 +2195,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, cmd->num_ssids = params->num_ssids; cmd->ie_len = params->extraie.len; cmd->n_probes = params->n_probes; + ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr); + ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr); ptr += sizeof(*cmd); @@ -2195,7 +2255,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; - if (params->extraie.len) + if (extraie_len_with_pad) memcpy(ptr, params->extraie.ptr, params->extraie.len); @@ -2386,6 +2446,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, tchan_info->reg_class_id); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, tchan_info->antennamax); + *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, + tchan_info->maxregpower); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", @@ -2754,6 +2816,42 @@ out: return ret; } +int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, + struct wmi_set_current_country_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_set_current_country_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_set_current_country_cmd *)skb->data; + cmd->tlv_header = + FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->pdev_id = ar->pdev->pdev_id; + memcpy(&cmd->new_alpha2, ¶m->alpha2, 3); + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "set current country pdev id %d alpha2 %c%c\n", + ar->pdev->pdev_id, + param->alpha2[0], + param->alpha2[1]); + + if (ret) { + ath11k_warn(ar->ab, + "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); + dev_kfree_skb(skb); + } + + return ret; +} + int ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, struct thermal_mitigation_params *param) @@ -2818,6 +2916,75 @@ ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, return ret; } +int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, + struct wmi_11d_scan_start_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_11d_scan_start_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_11d_scan_start_cmd *)skb->data; + cmd->tlv_header = + FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = param->vdev_id; + cmd->scan_period_msec = param->scan_period_msec; + cmd->start_interval_msec = param->start_interval_msec; + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "send 11d scan start vdev id %d period %d ms internal %d ms\n", + cmd->vdev_id, + cmd->scan_period_msec, + cmd->start_interval_msec); + + if (ret) { + ath11k_warn(ar->ab, + "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_11d_scan_stop_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; + cmd->tlv_header = + FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "send 11d scan stop vdev id %d\n", + cmd->vdev_id); + + if (ret) { + ath11k_warn(ar->ab, + "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); + dev_kfree_skb(skb); + } + + return ret; +} + int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) { struct ath11k_pdev_wmi *wmi = ar->wmi; @@ -3428,6 +3595,56 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, } static void +ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct wmi_obss_color_collision_event *ev; + struct ath11k_vif *arvif; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + rcu_read_lock(); + + ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch obss color collision ev"); + goto exit; + } + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); + if (!arvif) { + ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n", + ev->vdev_id); + goto exit; + } + + switch (ev->evt_type) { + case WMI_BSS_COLOR_COLLISION_DETECTION: + ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", + ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); + break; + case WMI_BSS_COLOR_COLLISION_DISABLE: + case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: + case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: + break; + default: + ath11k_warn(ab, "received unknown obss color collision detection event\n"); + } + +exit: + kfree(tb); + rcu_read_unlock(); +} + +static void ath11k_fill_band_to_mac_param(struct ath11k_base *soc, struct wmi_host_pdev_band_to_mac *band_to_mac) { @@ -4144,6 +4361,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy; soc->num_radios = 0; + soc->target_pdev_count = 0; phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map; while (phy_id_map && soc->num_radios < MAX_RADIOS) { @@ -4781,6 +4999,7 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; + int num_mgmt; spin_lock_bh(&ar->txmgmt_idr_lock); msdu = idr_find(&ar->txmgmt_idr, desc_id); @@ -4804,10 +5023,19 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, ieee80211_tx_status_irqsafe(ar->hw, msdu); + num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); + /* WARN when we received this event without doing any mgmt tx */ - if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0) + if (num_mgmt < 0) WARN_ON_ONCE(1); + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi mgmt tx comp pending %d desc id %d\n", + num_mgmt, desc_id); + + if (!num_mgmt) + wake_up(&ar->txmgmt_empty_waitq); + return 0; } @@ -5343,47 +5571,107 @@ ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src, dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt; } -int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, - struct ath11k_fw_stats *stats) +static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) { - const void **tb; - const struct wmi_stats_event *ev; - const void *data; - int i, ret; - u32 len = skb->len; + struct wmi_tlv_fw_stats_parse *parse = data; + const struct wmi_stats_event *ev = parse->ev; + struct ath11k_fw_stats *stats = parse->stats; + struct ath11k *ar; + struct ath11k_vif *arvif; + struct ieee80211_sta *sta; + struct ath11k_sta *arsta; + const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr; + int j, ret = 0; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return ret; + if (tag != WMI_TAG_RSSI_STATS) + return -EPROTO; + + rcu_read_lock(); + + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); + stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi stats vdev id %d mac %pM\n", + stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr); + + arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id); + if (!arvif) { + ath11k_warn(ab, "not found vif for vdev id %d\n", + stats_rssi->vdev_id); + ret = -EPROTO; + goto exit; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi stats bssid %pM vif %pK\n", + arvif->bssid, arvif->vif); + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, + arvif->bssid, + NULL); + if (!sta) { + ath11k_warn(ab, "not found station for bssid %pM\n", + arvif->bssid); + ret = -EPROTO; + goto exit; + } + + arsta = (struct ath11k_sta *)sta->drv_priv; + + BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > + ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); + + for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) { + arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j]; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n", + j, + stats_rssi->rssi_avg_beacon[j], + j, + stats_rssi->rssi_avg_data[j]); } - ev = tb[WMI_TAG_STATS_EVENT]; - data = tb[WMI_TAG_ARRAY_BYTE]; - if (!ev || !data) { +exit: + rcu_read_unlock(); + return ret; +} + +static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, + struct wmi_tlv_fw_stats_parse *parse, + const void *ptr, + u16 len) +{ + struct ath11k_fw_stats *stats = parse->stats; + const struct wmi_stats_event *ev = parse->ev; + struct ath11k *ar; + struct ath11k_vif *arvif; + struct ieee80211_sta *sta; + struct ath11k_sta *arsta; + int i, ret = 0; + const void *data = ptr; + + if (!ev) { ath11k_warn(ab, "failed to fetch update stats ev"); - kfree(tb); return -EPROTO; } - ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n", - ev->pdev_id, - ev->num_pdev_stats, ev->num_vdev_stats, - ev->num_bcn_stats); - - stats->pdev_id = ev->pdev_id; stats->stats_id = 0; + rcu_read_lock(); + + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); + for (i = 0; i < ev->num_pdev_stats; i++) { const struct wmi_pdev_stats *src; struct ath11k_fw_stats_pdev *dst; src = data; if (len < sizeof(*src)) { - kfree(tb); - return -EPROTO; + ret = -EPROTO; + goto exit; } stats->stats_id = WMI_REQUEST_PDEV_STAT; @@ -5407,12 +5695,29 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, src = data; if (len < sizeof(*src)) { - kfree(tb); - return -EPROTO; + ret = -EPROTO; + goto exit; } stats->stats_id = WMI_REQUEST_VDEV_STAT; + arvif = ath11k_mac_get_arvif(ar, src->vdev_id); + if (arvif) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, + arvif->bssid, + NULL); + if (sta) { + arsta = (struct ath11k_sta *)sta->drv_priv; + arsta->rssi_beacon = src->beacon_snr; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi stats vdev id %d snr %d\n", + src->vdev_id, src->beacon_snr); + } else { + ath11k_warn(ab, "not found station for bssid %pM\n", + arvif->bssid); + } + } + data += sizeof(*src); len -= sizeof(*src); @@ -5430,8 +5735,8 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, src = data; if (len < sizeof(*src)) { - kfree(tb); - return -EPROTO; + ret = -EPROTO; + goto exit; } stats->stats_id = WMI_REQUEST_BCN_STAT; @@ -5447,8 +5752,67 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, list_add_tail(&dst->list, &stats->bcn); } - kfree(tb); - return 0; +exit: + rcu_read_unlock(); + return ret; +} + +static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_fw_stats_parse *parse = data; + int ret = 0; + + switch (tag) { + case WMI_TAG_STATS_EVENT: + parse->ev = (struct wmi_stats_event *)ptr; + parse->stats->pdev_id = parse->ev->pdev_id; + break; + case WMI_TAG_ARRAY_BYTE: + ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); + break; + case WMI_TAG_PER_CHAIN_RSSI_STATS: + parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr; + + if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT) + parse->rssi_num = parse->rssi->num_per_chain_rssi_stats; + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi stats id 0x%x num chain %d\n", + parse->ev->stats_id, + parse->rssi_num); + break; + case WMI_TAG_ARRAY_STRUCT: + if (parse->rssi_num && !parse->chain_rssi_done) { + ret = ath11k_wmi_tlv_iter(ab, ptr, len, + ath11k_wmi_tlv_rssi_chain_parse, + parse); + if (ret) { + ath11k_warn(ab, "failed to parse rssi chain %d\n", + ret); + return ret; + } + parse->chain_rssi_done = true; + } + break; + default: + break; + } + return ret; +} + +int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, + struct ath11k_fw_stats *stats) +{ + struct wmi_tlv_fw_stats_parse parse = { }; + + stats->stats_id = 0; + parse.stats = stats; + + return ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_fw_stats_parse, + &parse); } size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head) @@ -5810,15 +6174,79 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) wake_up(&ab->wmi_ab.tx_credits_wq); } +static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + const struct wmi_11d_new_cc_ev *ev; + const void **tb; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; + if (!ev) { + kfree(tb); + ath11k_warn(ab, "failed to fetch 11d new cc ev"); + return -EPROTO; + } + + spin_lock_bh(&ab->base_lock); + memcpy(&ab->new_alpha2, &ev->new_alpha2, 2); + spin_unlock_bh(&ab->base_lock); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n", + ab->new_alpha2[0], + ab->new_alpha2[1]); + + kfree(tb); + + queue_work(ab->workqueue, &ab->update_11d_work); + + return 0; +} + static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { + struct ath11k_pdev_wmi *wmi = NULL; + u32 i; + u8 wmi_ep_count; + u8 eid; + + eid = ATH11K_SKB_CB(skb)->eid; dev_kfree_skb(skb); + + if (eid >= ATH11K_HTC_EP_COUNT) + return; + + wmi_ep_count = ab->htc.wmi_ep_count; + if (wmi_ep_count > ab->hw_params.max_radios) + return; + + for (i = 0; i < ab->htc.wmi_ep_count; i++) { + if (ab->wmi_ab.wmi[i].eid == eid) { + wmi = &ab->wmi_ab.wmi[i]; + break; + } + } + + if (wmi) + wake_up(&wmi->tx_ce_desc_wq); } static bool ath11k_reg_is_world_alpha(char *alpha) { - return alpha[0] == '0' && alpha[1] == '0'; + if (alpha[0] == '0' && alpha[1] == '0') + return true; + + if (alpha[0] == 'n' && alpha[1] == 'a') + return true; + + return false; } static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb) @@ -5911,7 +6339,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk ar = ab->pdevs[pdev_idx].ar; kfree(ab->new_regd[pdev_idx]); ab->new_regd[pdev_idx] = regd; - ieee80211_queue_work(ar->hw, &ar->regd_update_work); + queue_work(ab->workqueue, &ar->regd_update_work); } else { /* This regd would be applied during mac registration and is * held constant throughout for regd intersection purpose @@ -6111,6 +6539,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { + struct ath11k_vif *arvif; u32 vdev_id, tx_status; if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, @@ -6118,6 +6547,17 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s ath11k_warn(ab, "failed to extract bcn tx status"); return; } + + rcu_read_lock(); + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); + if (!arvif) { + ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status", + vdev_id); + rcu_read_unlock(); + return; + } + ath11k_mac_bcn_tx_event(arvif); + rcu_read_unlock(); } static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) @@ -6398,6 +6838,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; + u32 vdev_id; if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { ath11k_warn(ab, "failed to extract peer sta kickout event"); @@ -6413,10 +6854,15 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff if (!peer) { ath11k_warn(ab, "peer not found %pM\n", arg.mac_addr); + spin_unlock_bh(&ab->base_lock); goto exit; } - ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); + vdev_id = peer->vdev_id; + + spin_unlock_bh(&ab->base_lock); + + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", peer->vdev_id); @@ -6437,7 +6883,6 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff ieee80211_report_low_ack(sta, 10); exit: - spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); } @@ -6893,6 +7338,40 @@ exit: kfree(tb); } +static void ath11k_rfkill_state_change_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_rfkill_state_change_ev *ev; + const void **tb; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_RFKILL_EVENT]; + if (!ev) { + kfree(tb); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_MAC, + "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", + ev->gpio_pin_num, + ev->int_type, + ev->radio_state); + + spin_lock_bh(&ab->base_lock); + ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON); + spin_unlock_bh(&ab->base_lock); + + queue_work(ab->workqueue, &ab->rfkill_work); + kfree(tb); +} + static void ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, struct sk_buff *skb) @@ -7046,6 +7525,13 @@ static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_b complete(&ab->wow.wakeup_completed); } +static void +ath11k_wmi_diag_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + trace_ath11k_wmi_diag(ab, skb->data, skb->len); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7054,6 +7540,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id)); + trace_ath11k_wmi_event(ab, id, skb->data, skb->len); + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) goto out; @@ -7138,6 +7626,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath11k_probe_resp_tx_status_event(ab, skb); break; + case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: + ath11k_wmi_obss_color_collision_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: @@ -7157,6 +7648,15 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_WOW_WAKEUP_HOST_EVENTID: ath11k_wmi_event_wow_wakeup_host(ab, skb); break; + case WMI_11D_NEW_COUNTRY_EVENTID: + ath11k_reg_11d_new_cc_event(ab, skb); + break; + case WMI_RFKILL_STATE_CHANGE_EVENTID: + ath11k_rfkill_state_change_event(ab, skb); + break; + case WMI_DIAG_EVENTID: + ath11k_wmi_diag_event(ab, skb); + break; /* TODO: Add remaining events */ default: ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -7199,6 +7699,7 @@ static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; + init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq); return 0; } @@ -7414,3 +7915,31 @@ int ath11k_wmi_wow_enable(struct ath11k *ar) return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); } + +int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, + const u8 mac_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + struct wmi_scan_prob_req_oui_cmd *cmd; + u32 prob_req_oui; + int len; + + prob_req_oui = (((u32)mac_addr[0]) << 16) | + (((u32)mac_addr[1]) << 8) | mac_addr[2]; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_SCAN_PROB_REQ_OUI_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->prob_req_oui = prob_req_oui; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n", + prob_req_oui); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 0584e68e7593..2f26ec1a8aa3 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -113,10 +113,10 @@ enum wmi_host_hw_mode_priority { WMI_HOST_HW_MODE_MAX_PRI }; -enum { +enum WMI_HOST_WLAN_BAND { WMI_HOST_WLAN_2G_CAP = 0x1, WMI_HOST_WLAN_5G_CAP = 0x2, - WMI_HOST_WLAN_2G_5G_CAP = 0x3, + WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP, }; /* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command. @@ -774,6 +774,8 @@ enum wmi_tlv_event_id { WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), WMI_SAP_OFL_DEL_STA_EVENTID, + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID = + WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL), WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB), WMI_OCB_GET_TSF_TIMER_RESP_EVENTID, WMI_DCC_GET_STATS_RESP_EVENTID, @@ -2167,6 +2169,13 @@ enum wmi_nss_ratio { WMI_NSS_RATIO_2_NSS = 0x3, }; +enum wmi_dtim_policy { + WMI_DTIM_POLICY_IGNORE = 1, + WMI_DTIM_POLICY_NORMAL = 2, + WMI_DTIM_POLICY_STICK = 3, + WMI_DTIM_POLICY_AUTO = 4, +}; + struct wmi_host_pdev_band_to_mac { u32 pdev_id; u32 start_freq; @@ -2522,6 +2531,7 @@ struct ath11k_pdev_wmi { enum ath11k_htc_ep_id eid; const struct wmi_peer_flags_map *peer_flags; u32 rx_decap_mode; + wait_queue_head_t tx_ce_desc_wq; }; struct vdev_create_params { @@ -3079,7 +3089,6 @@ enum scan_dwelltime_adaptive_mode { #define WLAN_SCAN_MAX_NUM_SSID 10 #define WLAN_SCAN_MAX_NUM_BSSID 10 -#define WLAN_SCAN_MAX_NUM_CHANNELS 40 #define WLAN_SSID_MAX_LEN 32 @@ -3300,7 +3309,7 @@ struct scan_req_params { u32 num_bssid; u32 num_ssids; u32 n_probes; - u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS]; + u32 *chan_list; u32 notify_scan_events; struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; @@ -3311,6 +3320,8 @@ struct scan_req_params { u32 num_hint_bssid; struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID]; struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID]; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; }; struct wmi_ssid_arg { @@ -3617,6 +3628,7 @@ struct peer_assoc_params { u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET]; bool twt_responder; bool twt_requester; + bool is_assoc; struct ath11k_ppe_threshold peer_ppet; }; @@ -3673,6 +3685,11 @@ struct wmi_scan_chan_list_cmd { u32 pdev_id; } __packed; +struct wmi_scan_prob_req_oui_cmd { + u32 tlv_header; + u32 prob_req_oui; +} __packed; + #define WMI_MGMT_SEND_DOWNLD_LEN 64 #define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0) @@ -3766,6 +3783,16 @@ struct stats_request_params { u32 pdev_id; }; +struct wmi_set_current_country_params { + u8 alpha2[3]; +}; + +struct wmi_set_current_country_cmd { + u32 tlv_header; + u32 pdev_id; + u32 new_alpha2; +} __packed; + enum set_init_cc_type { WMI_COUNTRY_INFO_TYPE_ALPHA, WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE, @@ -3799,6 +3826,28 @@ struct wmi_init_country_cmd { } cc_info; } __packed; +struct wmi_11d_scan_start_params { + u32 vdev_id; + u32 scan_period_msec; + u32 start_interval_msec; +}; + +struct wmi_11d_scan_start_cmd { + u32 tlv_header; + u32 vdev_id; + u32 scan_period_msec; + u32 start_interval_msec; +} __packed; + +struct wmi_11d_scan_stop_cmd { + u32 tlv_header; + u32 vdev_id; +} __packed; + +struct wmi_11d_new_cc_ev { + u32 new_alpha2; +} __packed; + #define THERMAL_LEVELS 1 struct tt_level_config { u32 tmplwm; @@ -4390,6 +4439,17 @@ struct wmi_stats_event { u32 num_peer_extd2_stats; } __packed; +struct wmi_rssi_stats { + u32 vdev_id; + u32 rssi_avg_beacon[WMI_MAX_CHAINS]; + u32 rssi_avg_data[WMI_MAX_CHAINS]; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_per_chain_rssi_stats { + u32 num_per_chain_rssi_stats; +} __packed; + struct wmi_pdev_ctl_failsafe_chk_event { u32 pdev_id; u32 ctl_failsafe_status; @@ -4914,6 +4974,13 @@ struct wmi_pdev_obss_pd_bitmap_cmd { #define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000 #define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000 +enum wmi_bss_color_collision { + WMI_BSS_COLOR_COLLISION_DISABLE = 0, + WMI_BSS_COLOR_COLLISION_DETECTION, + WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY, + WMI_BSS_COLOR_FREE_SLOT_AVAILABLE, +}; + struct wmi_obss_color_collision_cfg_params_cmd { u32 tlv_header; u32 vdev_id; @@ -4931,6 +4998,12 @@ struct wmi_bss_color_change_enable_params_cmd { u32 enable; } __packed; +struct wmi_obss_color_collision_event { + u32 vdev_id; + u32 evt_type; + u64 obss_color_bitmap; +} __packed; + #define ATH11K_IPV4_TH_SEED_SIZE 5 #define ATH11K_IPV6_TH_SEED_SIZE 11 @@ -5142,6 +5215,31 @@ struct target_resource_config { u32 twt_ap_sta_count; }; +enum wmi_sys_cap_info_flags { + WMI_SYS_CAP_INFO_RXTX_LED = BIT(0), + WMI_SYS_CAP_INFO_RFKILL = BIT(1), +}; + +#define WMI_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0) +#define WMI_RFKILL_CFG_RADIO_LEVEL BIT(6) +#define WMI_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7) + +enum wmi_rfkill_enable_radio { + WMI_RFKILL_ENABLE_RADIO_ON = 0, + WMI_RFKILL_ENABLE_RADIO_OFF = 1, +}; + +enum wmi_rfkill_radio_state { + WMI_RFKILL_RADIO_STATE_OFF = 1, + WMI_RFKILL_RADIO_STATE_ON = 2, +}; + +struct wmi_rfkill_state_change_ev { + u32 gpio_pin_num; + u32 int_type; + u32 radio_state; +} __packed; + #define WMI_MAX_MEM_REQS 32 #define MAX_RADIOS 3 @@ -5351,7 +5449,8 @@ int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, u32 vdev_id, u32 param_id, u32 param_val); int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, u32 param_value, u8 pdev_id); -int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable); +int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, + enum wmi_sta_ps_mode psmode); int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab); int ath11k_wmi_cmd_init(struct ath11k_base *ab); int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab); @@ -5415,9 +5514,16 @@ int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 initiator, u32 reason); int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, u32 vdev_id, u32 bcn_ctrl_op); +int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, + struct wmi_set_current_country_params *param); int ath11k_wmi_send_init_country_cmd(struct ath11k *ar, struct wmi_init_country_params init_cc_param); + +int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, + struct wmi_11d_scan_start_params *param); +int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id); + int ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, struct thermal_mitigation_params *param); @@ -5474,5 +5580,6 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, enum wmi_host_hw_mode_config_type mode); int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar); int ath11k_wmi_wow_enable(struct ath11k *ar); - +int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, + const u8 mac_addr[ETH_ALEN]); #endif diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index cef17f33c69e..66d123f48085 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -727,6 +727,43 @@ ath5k_get_rate_hw_value(const struct ieee80211_hw *hw, return hw_rate; } +static bool ath5k_merge_ratetbl(struct ieee80211_sta *sta, + struct ath5k_buf *bf, + struct ieee80211_tx_info *tx_info) +{ + struct ieee80211_sta_rates *ratetbl; + u8 i; + + if (!sta) + return false; + + ratetbl = rcu_dereference(sta->rates); + if (!ratetbl) + return false; + + if (tx_info->control.rates[0].idx < 0 || + tx_info->control.rates[0].count == 0) + { + i = 0; + } else { + bf->rates[0] = tx_info->control.rates[0]; + i = 1; + } + + for ( ; i < IEEE80211_TX_MAX_RATES; i++) { + bf->rates[i].idx = ratetbl->rate[i].idx; + bf->rates[i].flags = ratetbl->rate[i].flags; + if (tx_info->control.use_rts) + bf->rates[i].count = ratetbl->rate[i].count_rts; + else if (tx_info->control.use_cts_prot) + bf->rates[i].count = ratetbl->rate[i].count_cts; + else + bf->rates[i].count = ratetbl->rate[i].count; + } + + return true; +} + static int ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, struct ath5k_txq *txq, int padsize, @@ -737,6 +774,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; struct ieee80211_rate *rate; + struct ieee80211_sta *sta; unsigned int mrr_rate[3], mrr_tries[3]; int i, ret; u16 hw_rate; @@ -753,8 +791,16 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, if (dma_mapping_error(ah->dev, bf->skbaddr)) return -ENOSPC; - ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates, - ARRAY_SIZE(bf->rates)); + if (control) + sta = control->sta; + else + sta = NULL; + + if (!ath5k_merge_ratetbl(sta, bf, info)) { + ieee80211_get_tx_rates(info->control.vif, + sta, skb, bf->rates, + ARRAY_SIZE(bf->rates)); + } rate = ath5k_get_rate(ah->hw, info, bf, 0); diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h index 112d8a9b8d43..d3534a29c4f0 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.h +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -153,12 +153,19 @@ * implementations. */ struct htc_frame_hdr { - u8 eid; - u8 flags; - - /* length of data (including trailer) that follows the header */ - __le16 payld_len; - + struct_group_tagged(htc_frame_look_ahead, header, + union { + struct { + u8 eid; + u8 flags; + + /* length of data (including trailer) that follows the header */ + __le16 payld_len; + + }; + u32 word; + }; + ); /* end of 4-byte lookahead */ u8 ctrl[2]; diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index 998947ef63b6..e3874421c4c0 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -2260,19 +2260,16 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) { struct htc_packet *packet = NULL; - struct htc_frame_hdr *htc_hdr; - u32 look_ahead; + struct htc_frame_look_ahead look_ahead; - if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, + if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word, HTC_TARGET_RESPONSE_TIMEOUT)) return NULL; ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); - - htc_hdr = (struct htc_frame_hdr *)&look_ahead; + "htc rx wait ctrl look_ahead 0x%X\n", look_ahead.word); - if (htc_hdr->eid != ENDPOINT_0) + if (look_ahead.eid != ENDPOINT_0) return NULL; packet = htc_get_control_buf(target, false); @@ -2281,8 +2278,8 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) return NULL; packet->info.rx.rx_flags = 0; - packet->info.rx.exp_hdr = look_ahead; - packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; + packet->info.rx.exp_hdr = look_ahead.word; + packet->act_len = le16_to_cpu(look_ahead.payld_len) + HTC_HDR_LENGTH; if (packet->act_len > packet->buf_len) goto fail_ctrl_rx; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index ce9a0a53771e..fba5a847c3bb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -120,7 +120,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, AR_ISR_TXEOL); } - ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ah->intr_txqs = MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 7e27a06e5df1..dc24da1ff00b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1005,24 +1005,20 @@ static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah, int i, int nmeasurement) { struct ath_common *common = ath9k_hw_common(ah); - int im, ix, iy, temp; + int im, ix, iy; for (im = 0; im < nmeasurement; im++) { for (ix = 0; ix < MAXIQCAL - 1; ix++) { for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) { if (coeff->mag_coeff[i][im][iy] < coeff->mag_coeff[i][im][ix]) { - temp = coeff->mag_coeff[i][im][ix]; - coeff->mag_coeff[i][im][ix] = - coeff->mag_coeff[i][im][iy]; - coeff->mag_coeff[i][im][iy] = temp; + swap(coeff->mag_coeff[i][im][ix], + coeff->mag_coeff[i][im][iy]); } if (coeff->phs_coeff[i][im][iy] < coeff->phs_coeff[i][im][ix]) { - temp = coeff->phs_coeff[i][im][ix]; - coeff->phs_coeff[i][im][ix] = - coeff->phs_coeff[i][im][iy]; - coeff->phs_coeff[i][im][iy] = temp; + swap(coeff->phs_coeff[i][im][ix], + coeff->phs_coeff[i][im][iy]); } } } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 860da13bfb6a..f06eec99de68 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -590,6 +590,13 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, return; } + if (pkt_len > 2 * MAX_RX_BUF_SIZE) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); + RX_STAT_INC(skb_dropped); + return; + } + pad_len = 4 - (pkt_len & 0x3); if (pad_len == 4) pad_len = 0; diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 0a1634238e67..6b45e63fae4b 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -281,6 +281,7 @@ struct ath9k_htc_rxbuf { struct ath9k_htc_rx { struct list_head rxbuf; spinlock_t rxbuflock; + bool initialized; }; #define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */ @@ -305,6 +306,7 @@ struct ath9k_htc_tx { DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM); struct timer_list cleanup_timer; spinlock_t tx_lock; + bool initialized; }; struct ath9k_htc_tx_ctl { diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 8e69e8989f6d..6a850a0bfa8a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -813,6 +813,11 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv) skb_queue_head_init(&priv->tx.data_vi_queue); skb_queue_head_init(&priv->tx.data_vo_queue); skb_queue_head_init(&priv->tx.tx_failed); + + /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */ + smp_wmb(); + priv->tx.initialized = true; + return 0; } @@ -1130,6 +1135,10 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb, struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; unsigned long flags; + /* Check if ath9k_rx_init() completed. */ + if (!data_race(priv->rx.initialized)) + goto err; + spin_lock_irqsave(&priv->rx.rxbuflock, flags); list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { if (!tmp_buf->in_process) { @@ -1185,6 +1194,10 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv) list_add_tail(&rxbuf->list, &priv->rx.rxbuf); } + /* Allow ath9k_htc_rxep() to operate. */ + smp_wmb(); + priv->rx.initialized = true; + return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index fe29ad4b9023..f315c54bd3ac 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -169,6 +169,10 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t) &wmi->drv_priv->fatal_work); break; case WMI_TXSTATUS_EVENTID: + /* Check if ath9k_tx_init() completed. */ + if (!data_race(priv->tx.initialized)) + break; + spin_lock_bh(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { spin_unlock_bh(&priv->tx.tx_lock); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 5691bd6eb82c..d0caf1de2bde 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -154,11 +154,52 @@ static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) seqno << IEEE80211_SEQ_SEQ_SHIFT); } +static bool ath_merge_ratetbl(struct ieee80211_sta *sta, struct ath_buf *bf, + struct ieee80211_tx_info *tx_info) +{ + struct ieee80211_sta_rates *ratetbl; + u8 i; + + if (!sta) + return false; + + ratetbl = rcu_dereference(sta->rates); + if (!ratetbl) + return false; + + if (tx_info->control.rates[0].idx < 0 || + tx_info->control.rates[0].count == 0) + { + i = 0; + } else { + bf->rates[0] = tx_info->control.rates[0]; + i = 1; + } + + for ( ; i < IEEE80211_TX_MAX_RATES; i++) { + bf->rates[i].idx = ratetbl->rate[i].idx; + bf->rates[i].flags = ratetbl->rate[i].flags; + if (tx_info->control.use_rts) + bf->rates[i].count = ratetbl->rate[i].count_rts; + else if (tx_info->control.use_cts_prot) + bf->rates[i].count = ratetbl->rate[i].count_cts; + else + bf->rates[i].count = ratetbl->rate[i].count; + } + + return true; +} + static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ath_buf *bf) { - ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates, - ARRAY_SIZE(bf->rates)); + struct ieee80211_tx_info *tx_info; + + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + + if (!ath_merge_ratetbl(sta, bf, tx_info)) + ieee80211_get_tx_rates(vif, sta, bf->bf_mpdu, bf->rates, + ARRAY_SIZE(bf->rates)); } static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index cca3b086aa70..49f7ee1c912b 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -307,8 +307,7 @@ static void carl9170_zap_queues(struct ar9170 *ar) for (i = 0; i < ar->hw->queues; i++) ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD; - for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++) - ar->mem_bitmap[i] = 0; + bitmap_zero(ar->mem_bitmap, ar->fw.mem_blocks); rcu_read_lock(); list_for_each_entry_rcu(cvif, &ar->vif_list, list) { @@ -1968,9 +1967,7 @@ int carl9170_register(struct ar9170 *ar) if (WARN_ON(ar->mem_bitmap)) return -EINVAL; - ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG), - sizeof(unsigned long), - GFP_KERNEL); + ar->mem_bitmap = bitmap_zalloc(ar->fw.mem_blocks, GFP_KERNEL); if (!ar->mem_bitmap) return -ENOMEM; @@ -2085,7 +2082,7 @@ void carl9170_free(struct ar9170 *ar) kfree_skb(ar->rx_failover); ar->rx_failover = NULL; - kfree(ar->mem_bitmap); + bitmap_free(ar->mem_bitmap); ar->mem_bitmap = NULL; kfree(ar->survey); diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 88444fe6d1c6..1b76f4434c06 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -275,12 +275,12 @@ static void carl9170_tx_release(struct kref *ref) if (WARN_ON_ONCE(!ar)) return; - BUILD_BUG_ON( - offsetof(struct ieee80211_tx_info, status.ack_signal) != 20); - - memset(&txinfo->status.ack_signal, 0, - sizeof(struct ieee80211_tx_info) - - offsetof(struct ieee80211_tx_info, status.ack_signal)); + /* + * This does not call ieee80211_tx_info_clear_status() because + * carl9170_tx_fill_rateinfo() has filled the rate information + * before we get to this point. + */ + memset_after(&txinfo->status, 0, rates); if (atomic_read(&ar->tx_total_queued)) ar->tx_schedule = true; diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 8d5a16b558e6..774419c7f442 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -126,6 +126,7 @@ enum CountryCode { CTRY_KOREA_ROC = 410, CTRY_KOREA_ROC2 = 411, CTRY_KOREA_ROC3 = 412, + CTRY_KOREA_ROC4 = 413, CTRY_KUWAIT = 414, CTRY_LATVIA = 428, CTRY_LEBANON = 422, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index c4bd26e65949..cdb1e9a23734 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -76,6 +76,7 @@ enum EnumRd { APL7_FCCA = 0x5C, APL8_WORLD = 0x5D, APL9_WORLD = 0x5E, + APL10_WORLD = 0x5F, WOR0_WORLD = 0x60, WOR1_WORLD = 0x61, @@ -204,6 +205,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {APL6_WORLD, CTL_ETSI, CTL_ETSI}, {APL8_WORLD, CTL_ETSI, CTL_ETSI}, {APL9_WORLD, CTL_ETSI, CTL_ETSI}, + {APL10_WORLD, CTL_ETSI, CTL_ETSI}, {APL3_FCCA, CTL_FCC, CTL_FCC}, {APL7_FCCA, CTL_FCC, CTL_FCC}, @@ -426,6 +428,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, {CTRY_KOREA_ROC3, APL9_WORLD, "K3"}, + {CTRY_KOREA_ROC4, APL10_WORLD, "K4"}, {CTRY_KUWAIT, ETSI3_WORLD, "KW"}, {CTRY_LATVIA, ETSI1_WORLD, "LV"}, {CTRY_LEBANON, NULL1_WORLD, "LB"}, diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index aff04ef66266..4e9e13941c8f 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } +static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) +{ + int reg_data = 0; + + wcn36xx_dxe_read_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + ®_data); + + reg_data &= ~wcn_ch; + + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + (int)reg_data); +} + static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl, gfp_t gfp) @@ -834,6 +849,53 @@ unlock: return ret; } +static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch) +{ + unsigned long flags; + struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start; + struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; + bool ret = true; + + spin_lock_irqsave(&ch->lock, flags); + + /* Loop through ring buffer looking for nonempty entries. */ + ctl_bd_start = ch->head_blk_ctl; + ctl_bd = ctl_bd_start; + ctl_skb_start = ctl_bd_start->next; + ctl_skb = ctl_skb_start; + do { + if (ctl_skb->skb) { + ret = false; + goto unlock; + } + ctl_bd = ctl_skb->next; + ctl_skb = ctl_bd->next; + } while (ctl_skb != ctl_skb_start); + +unlock: + spin_unlock_irqrestore(&ch->lock, flags); + return ret; +} + +int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn) +{ + int i = 0; + + /* Called with mac80211 queues stopped. Wait for empty HW queues. */ + do { + if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) && + _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) { + return 0; + } + /* This ieee80211_ops callback is specifically allowed to + * sleep. + */ + usleep_range(1000, 1100); + } while (++i < 100); + + return -EBUSY; +} + int wcn36xx_dxe_init(struct wcn36xx *wcn) { int reg_data = 0, ret; @@ -869,7 +931,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_WQ_TX_L); wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); /***************************************/ /* Init descriptors for TX HIGH channel */ @@ -893,9 +954,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); - /***************************************/ /* Init descriptors for RX LOW channel */ /***************************************/ @@ -905,7 +963,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) goto out_err_rxl_ch; } - /* For RX we need to preallocated buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); @@ -928,9 +985,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); - /***************************************/ /* Init descriptors for RX HIGH channel */ /***************************************/ @@ -962,15 +1016,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); - ret = wcn36xx_dxe_request_irqs(wcn); if (ret < 0) goto out_err_irq; timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0); + /* Enable channel interrupts */ + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + return 0; out_err_irq: @@ -987,6 +1044,14 @@ out_err_txh_ch: void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { + int reg_data = 0; + + /* Disable channel interrupts */ + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); del_timer(&wcn->tx_ack_timer); @@ -996,6 +1061,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn) wcn->tx_ack_skb = NULL; } + /* Put the DXE block into reset before freeing memory */ + reg_data = WCN36XX_DXE_REG_RESET; + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data); + wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); + + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch); } diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index 31b81b7547a3..26a31edf52e9 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -466,5 +466,6 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct wcn36xx_tx_bd *bd, struct sk_buff *skb, bool is_low); +int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn); void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status); #endif /* _DXE_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 9bea2b01f9aa..2a1db9756fd5 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -3459,9 +3459,6 @@ struct wcn36xx_hal_missed_beacon_ind_msg { /* Beacon Filtering data structures */ -/* The above structure would be followed by multiple of below mentioned - * structure - */ struct beacon_filter_ie { u8 element_id; u8 check_ie_presence; @@ -3469,7 +3466,27 @@ struct beacon_filter_ie { u8 value; u8 bitmask; u8 ref; -}; +} __packed; + +#define WCN36XX_FILTER_CAPABILITY_MASK 0x73cf +#define WCN36XX_FILTER_IE_DS_CHANNEL_MASK 0x00 +#define WCN36XX_FILTER_IE_ERP_FILTER_MASK 0xF8 +#define WCN36XX_FILTER_IE_EDCA_FILTER_MASK 0xF0 +#define WCN36XX_FILTER_IE_QOS_FILTER_MASK 0xF0 +#define WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK 0x00 +#define WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK 0x00 +#define WCN36XX_FILTER_IE_HT_BYTE1_FILTER_MASK 0xF8 +#define WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK 0xEB +#define WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK 0xFD +#define WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK 0x00 +#define WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK 0x00 +#define WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK 0xFC +#define WCN36XX_FILTER_IE_RSN_MASK 0x00 +#define WCN36XX_FILTER_IE_VENDOR_MASK 0x00 + +/* The above structure would be followed by multiple of below mentioned + * structure + */ struct wcn36xx_hal_add_bcn_filter_req_msg { struct wcn36xx_hal_msg_header header; @@ -3480,14 +3497,14 @@ struct wcn36xx_hal_add_bcn_filter_req_msg { u16 ie_num; u8 bss_index; u8 reserved; -}; +} __packed; struct wcn36xx_hal_rem_bcn_filter_req { struct wcn36xx_hal_msg_header header; u8 ie_Count; u8 rem_ie_id[1]; -}; +} __packed; #define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD 0 #define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD 1 diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index b04533bbc3a4..9575d7373bf2 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -402,6 +402,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch) static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) { struct wcn36xx *wcn = hw->priv; + int ret; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); @@ -417,17 +418,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); - wcn->sw_scan_channel = 0; + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. */ - wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (!wcn->sw_scan_init) { + /* This can fail if we are unable to notify the + * operating channel. + */ + ret = wcn36xx_smd_init_scan(wcn, + HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + if (ret) { + mutex_unlock(&wcn->conf_mutex); + return -EIO; + } + } wcn36xx_smd_start_scan(wcn, ch); - wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -707,6 +722,8 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, struct wcn36xx *wcn = hw->priv; struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); + wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_start"); + wcn->sw_scan = true; wcn->sw_scan_vif = vif; wcn->sw_scan_channel = 0; @@ -721,8 +738,15 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, { struct wcn36xx *wcn = hw->priv; + wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_complete"); + /* ensure that any scan session is finished */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } wcn->sw_scan = false; wcn->sw_scan_opchannel = 0; } @@ -910,6 +934,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, * place where AID is available. */ wcn36xx_smd_config_sta(wcn, vif, sta); + if (vif->type == NL80211_IFTYPE_STATION) + wcn36xx_smd_add_beacon_filter(wcn, vif); wcn36xx_enable_keep_alive_null_packet(wcn, vif); } else { wcn36xx_dbg(WCN36XX_DBG_MAC, @@ -1196,7 +1222,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, u16 tid = params->tid; u16 *ssn = ¶ms->ssn; int ret = 0; - u8 session; + int session; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); @@ -1208,9 +1234,11 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, sta_priv->tid = tid; session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0, get_sta_index(vif, sta_priv)); + if (session < 0) { + ret = session; + goto out; + } wcn36xx_smd_add_ba(wcn, session); - wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid, - session); break; case IEEE80211_AMPDU_RX_STOP: wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv)); @@ -1220,6 +1248,18 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START; spin_unlock_bh(&sta_priv->ampdu_lock); + /* Replace the mac80211 ssn with the firmware one */ + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu ssn = %u\n", *ssn); + wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid, ssn); + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu fw-ssn = %u\n", *ssn); + + /* Start BA session */ + session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1, + get_sta_index(vif, sta_priv)); + if (session < 0) { + ret = session; + goto out; + } ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -1227,8 +1267,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL; spin_unlock_bh(&sta_priv->ampdu_lock); - wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1, - get_sta_index(vif, sta_priv)); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: @@ -1244,6 +1282,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_err("Unknown AMPDU action\n"); } +out: mutex_unlock(&wcn->conf_mutex); return ret; @@ -1277,6 +1316,16 @@ static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw, } #endif +static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct wcn36xx *wcn = hw->priv; + + if (wcn36xx_dxe_tx_flush(wcn)) { + wcn36xx_err("Failed to flush hardware tx queues\n"); + } +} + static const struct ieee80211_ops wcn36xx_ops = { .start = wcn36xx_start, .stop = wcn36xx_stop, @@ -1304,6 +1353,7 @@ static const struct ieee80211_ops wcn36xx_ops = { #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = wcn36xx_ipv6_addr_change, #endif + .flush = wcn36xx_flush, CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd) }; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index ed45e2cf039b..caeb68901326 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -722,6 +722,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, wcn36xx_err("hal_init_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = true; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -752,6 +753,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_start_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = scan_channel; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -782,6 +784,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_end_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = 0; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -823,6 +826,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, wcn36xx_err("hal_finish_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = false; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -940,7 +944,7 @@ int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_re INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ); - msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels)); + msg_body->num_channel = min_t(u8, req->n_channels, ARRAY_SIZE(msg_body->channels)); for (i = 0; i < msg_body->num_channel; i++) { struct wcn36xx_hal_channel_param *param = &msg_body->channels[i]; u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER; @@ -2557,6 +2561,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, &session_id); if (ret) { wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret); + ret = -EINVAL; goto out; } @@ -2622,27 +2627,43 @@ out: return ret; } -static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len) +static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info) { + struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate; struct wcn36xx_hal_trigger_ba_rsp_msg *rsp; + int i; if (len < sizeof(*rsp)) return -EINVAL; rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf; + + if (rsp->candidate_cnt < 1) + return rsp->status ? rsp->status : -EINVAL; + + candidate = (struct wcn36xx_hal_trigger_ba_rsp_candidate *)(buf + sizeof(*rsp)); + + for (i = 0; i < STACFG_MAX_TC; i++) { + ba_info[i] = candidate->ba_info[i]; + } + return rsp->status; } -int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id) +int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn) { struct wcn36xx_hal_trigger_ba_req_msg msg_body; struct wcn36xx_hal_trigger_ba_req_candidate *candidate; + struct add_ba_info ba_info[STACFG_MAX_TC]; int ret; + if (tid >= STACFG_MAX_TC) + return -EINVAL; + mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ); - msg_body.session_id = session_id; + msg_body.session_id = 0; /* not really used */ msg_body.candidate_cnt = 1; msg_body.header.len += sizeof(*candidate); PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2657,13 +2678,17 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 sessio wcn36xx_err("Sending hal_trigger_ba failed\n"); goto out; } - ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len); + ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len, ba_info); if (ret) { wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret); goto out; } out: mutex_unlock(&wcn->hal_mutex); + + if (ssn) + *ssn = ba_info[tid].starting_seq_num; + return ret; } @@ -2732,7 +2757,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", tmp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); } return 0; } @@ -2747,7 +2772,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", rsp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); return 0; } } @@ -3168,6 +3193,91 @@ out: return ret; } +#define BEACON_FILTER(eid, presence, offs, val, mask, ref_val) \ + { \ + .element_id = eid, \ + .check_ie_presence = presence, \ + .offset = offs, \ + .value = val, \ + .bitmask = mask, \ + .ref = ref_val, \ + } + +static const struct beacon_filter_ie bcn_filter_ies[] = { + BEACON_FILTER(WLAN_EID_DS_PARAMS, 0, 0, 0, + WCN36XX_FILTER_IE_DS_CHANNEL_MASK, 0), + BEACON_FILTER(WLAN_EID_ERP_INFO, 0, 0, 0, + WCN36XX_FILTER_IE_ERP_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_EDCA_PARAM_SET, 0, 0, 0, + WCN36XX_FILTER_IE_EDCA_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_QOS_CAPA, 0, 0, 0, + WCN36XX_FILTER_IE_QOS_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_CHANNEL_SWITCH, 1, 0, 0, + WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK, 0), + BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 0, 0, + WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 2, 0, + WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 5, 0, + WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK, 0), + BEACON_FILTER(WLAN_EID_PWR_CONSTRAINT, 0, 0, 0, + WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK, 0), + BEACON_FILTER(WLAN_EID_OPMODE_NOTIF, 0, 0, 0, + WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK, 0), + BEACON_FILTER(WLAN_EID_VHT_OPERATION, 0, 0, 0, + WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK, 0), + BEACON_FILTER(WLAN_EID_RSN, 1, 0, 0, + WCN36XX_FILTER_IE_RSN_MASK, 0), + BEACON_FILTER(WLAN_EID_VENDOR_SPECIFIC, 1, 0, 0, + WCN36XX_FILTER_IE_VENDOR_MASK, 0), +}; + +int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn, + struct ieee80211_vif *vif) +{ + struct wcn36xx_hal_add_bcn_filter_req_msg msg_body, *body; + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); + u8 *payload; + size_t payload_size; + int ret; + + if (!get_feat_caps(wcn->fw_feat_caps, BCN_FILTER)) + return -EOPNOTSUPP; + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BCN_FILTER_REQ); + + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + body = (struct wcn36xx_hal_add_bcn_filter_req_msg *)wcn->hal_buf; + body->capability_info = vif->bss_conf.assoc_capability; + body->capability_mask = WCN36XX_FILTER_CAPABILITY_MASK; + body->beacon_interval = vif->bss_conf.beacon_int; + body->ie_num = ARRAY_SIZE(bcn_filter_ies); + body->bss_index = vif_priv->bss_index; + + payload = ((u8 *)body) + body->header.len; + payload_size = sizeof(bcn_filter_ies); + memcpy(payload, &bcn_filter_ies, payload_size); + + body->header.len += payload_size; + + ret = wcn36xx_smd_send_and_wait(wcn, body->header.len); + if (ret) { + wcn36xx_err("Sending add bcn_filter failed\n"); + goto out; + } + + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("add bcn filter response failed err=%d\n", ret); + goto out; + } +out: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, void *buf, int len, void *priv, u32 addr) { @@ -3223,6 +3333,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_ENTER_IMPS_RSP: case WCN36XX_HAL_EXIT_IMPS_RSP: case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP: + case WCN36XX_HAL_ADD_BCN_FILTER_RSP: memcpy(wcn->hal_buf, buf, len); wcn->hal_rsp_len = len; complete(&wcn->hal_rsp_compl); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 88e045dad8f3..957cfa87fbde 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -137,7 +137,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id); int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index); -int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id); +int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); @@ -167,4 +167,7 @@ int wcn36xx_smd_host_resume(struct wcn36xx *wcn); int wcn36xx_smd_enter_imps(struct wcn36xx *wcn); int wcn36xx_smd_exit_imps(struct wcn36xx *wcn); +int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn, + struct ieee80211_vif *vif); + #endif /* _SMD_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 75951ccbc840..dd58dde8c836 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -272,7 +272,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) const struct wcn36xx_rate *rate; struct ieee80211_hdr *hdr; struct wcn36xx_rx_bd *bd; - struct ieee80211_supported_band *sband; u16 fc, sn; /* @@ -314,8 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) fc = __le16_to_cpu(hdr->frame_control); sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.mactime = 10; status.signal = -get_rssi0(bd); status.antenna = 1; @@ -327,18 +324,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); + if (bd->scan_learn) { + /* If packet originate from hardware scanning, extract the + * band/channel from bd descriptor. + */ + u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; + + if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { + status.band = NL80211_BAND_5GHZ; + status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], + status.band); + } else { + status.band = NL80211_BAND_2GHZ; + status.freq = ieee80211_channel_to_frequency(hwch, status.band); + } + } else { + status.band = WCN36XX_BAND(wcn); + status.freq = WCN36XX_CENTER_FREQ(wcn); + } + if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) { rate = &wcn36xx_rate_table[bd->rate_id]; status.encoding = rate->encoding; status.enc_flags = rate->encoding_flags; status.bw = rate->bw; status.rate_idx = rate->mcs_or_legacy_index; - sband = wcn->hw->wiphy->bands[status.band]; status.nss = 1; if (status.band == NL80211_BAND_5GHZ && status.encoding == RX_ENC_LEGACY && - status.rate_idx >= sband->n_bitrates) { + status.rate_idx >= 4) { /* no dsss rates in 5Ghz rates table */ status.rate_idx -= 4; } @@ -353,22 +368,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) ieee80211_is_probe_resp(hdr->frame_control)) status.boottime_ns = ktime_get_boottime_ns(); - if (bd->scan_learn) { - /* If packet originates from hardware scanning, extract the - * band/channel from bd descriptor. - */ - u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; - - if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { - status.band = NL80211_BAND_5GHZ; - status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], - status.band); - } else { - status.band = NL80211_BAND_2GHZ; - status.freq = ieee80211_channel_to_frequency(hwch, status.band); - } - } - memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 1c8d918137da..fbd0558c2c19 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -248,6 +248,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + bool sw_scan_init; u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig index 5bf2318763c5..3a1a35b5672f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/Kconfig +++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig @@ -7,16 +7,20 @@ config BRCMSMAC depends on MAC80211 depends on BCMA_POSSIBLE select BCMA - select NEW_LEDS if BCMA_DRIVER_GPIO - select LEDS_CLASS if BCMA_DRIVER_GPIO select BRCMUTIL select FW_LOADER select CORDIC help This module adds support for PCIe wireless adapters based on Broadcom - IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will - be available if you select BCMA_DRIVER_GPIO. If you choose to build a - module, the driver will be called brcmsmac.ko. + IEEE802.11n SoftMAC chipsets. If you choose to build a module, the + driver will be called brcmsmac.ko. + +config BRCMSMAC_LEDS + def_bool BRCMSMAC && BCMA_DRIVER_GPIO && MAC80211_LEDS + help + The brcmsmac LED support depends on the presence of the + BCMA_DRIVER_GPIO driver, and it only works if LED support + is enabled and reachable from the driver module. source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig" diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index fb727778312c..ba52318615ae 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3901,6 +3901,24 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, cfg->wowl.active = true; } +static int brcmf_keepalive_start(struct brcmf_if *ifp, unsigned int interval) +{ + struct brcmf_mkeep_alive_pkt_le kalive = {0}; + int ret = 0; + + /* Configure Null function/data keepalive */ + kalive.version = cpu_to_le16(1); + kalive.period_msec = cpu_to_le32(interval * MSEC_PER_SEC); + kalive.len_bytes = cpu_to_le16(0); + kalive.keep_alive_id = 0; + + ret = brcmf_fil_iovar_data_set(ifp, "mkeep_alive", &kalive, sizeof(kalive)); + if (ret) + brcmf_err("keep-alive packet config failed, ret=%d\n", ret); + + return ret; +} + static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowl) { @@ -3947,6 +3965,9 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, } else { /* Configure WOWL paramaters */ brcmf_configure_wowl(cfg, ifp, wowl); + + /* Prevent disassociation due to inactivity with keep-alive */ + brcmf_keepalive_start(ifp, 30); } exit: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index ff2ef557f0ea..e69d1e56996f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -1052,4 +1052,23 @@ struct brcmf_gscan_config { struct brcmf_gscan_bucket_config bucket[1]; }; +/** + * struct brcmf_mkeep_alive_pkt_le - configuration data for keep-alive frame. + * + * @version: version for mkeep_alive + * @length: length of fixed parameters in the structure. + * @period_msec: keep-alive period in milliseconds. + * @len_bytes: size of the data. + * @keep_alive_id: ID (0 - 3). + * @data: keep-alive frame data. + */ +struct brcmf_mkeep_alive_pkt_le { + __le16 version; + __le16 length; + __le32 period_msec; + __le16 len_bytes; + u8 keep_alive_id; + u8 data[0]; +} __packed; + #endif /* FWIL_TYPES_H_ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile index 482d7737764d..090757730ba6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile @@ -42,6 +42,6 @@ brcmsmac-y := \ brcms_trace_events.o \ debug.o -brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o +brcmsmac-$(CONFIG_BRCMSMAC_LEDS) += led.o obj-$(CONFIG_BRCMSMAC) += brcmsmac.o diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h index d65f5c268fd7..2a5cbeb9e783 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h @@ -24,7 +24,7 @@ struct brcms_led { struct gpio_desc *gpiod; }; -#ifdef CONFIG_BCMA_DRIVER_GPIO +#ifdef CONFIG_BRCMSMAC_LEDS void brcms_led_unregister(struct brcms_info *wl); int brcms_led_register(struct brcms_info *wl); #else diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 23037bfc9e4c..5727c7c00a28 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -2303,7 +2303,7 @@ static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) ssid); } -static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) +static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac) { if (!priv || !mac) { IPW_ERROR("Invalid args\n"); diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig index 24fe3f63c321..7eacc8e58ee1 100644 --- a/drivers/net/wireless/intel/iwlegacy/Kconfig +++ b/drivers/net/wireless/intel/iwlegacy/Kconfig @@ -2,14 +2,13 @@ config IWLEGACY tristate select FW_LOADER - select NEW_LEDS - select LEDS_CLASS select LEDS_TRIGGERS select MAC80211_LEDS config IWL4965 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" depends on PCI && MAC80211 + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 select IWLEGACY help This option enables support for @@ -38,6 +37,7 @@ config IWL4965 config IWL3945 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" depends on PCI && MAC80211 + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 select IWLEGACY help Select to build the driver supporting the: diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 1085afbefba8..c21c0c68849a 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -2,6 +2,7 @@ config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " depends on PCI && HAS_IOMEM && CFG80211 + depends on IWLMEI || !IWLMEI select FW_LOADER help Select to build the driver supporting the: @@ -47,7 +48,7 @@ if IWLWIFI config IWLWIFI_LEDS bool - depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 depends on IWLMVM || IWLDVM select LEDS_TRIGGERS select MAC80211_LEDS @@ -146,3 +147,28 @@ config IWLWIFI_DEVICE_TRACING endmenu endif + +config IWLMEI + tristate "Intel Management Engine communication over WLAN" + depends on INTEL_MEI + depends on PM + help + Enables the iwlmei kernel module. + + CSME stands for Converged Security and Management Engine. It is a CPU + on the chipset and runs a dedicated firmware. AMT (Active Management + Technology) is one of the applications that run on that CPU. AMT + allows to control the platform remotely. + + This kernel module allows to communicate with the Intel Management + Engine over Wifi. This is supported starting from Tiger Lake + platforms and has been tested on 9260 devices only. + If AMT is configured not to use the wireless device, this module is + harmless (and useless). + Enabling this option on a platform that has a different device and + has Wireless enabled on AMT can prevent WiFi from working correctly. + + For more information see + <https://software.intel.com/en-us/manageability/> + + If unsure, say N. diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 0d4656efe908..75a703eb1bdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -30,5 +30,6 @@ ccflags-y += -I$(src) obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLMVM) += mvm/ +obj-$(CONFIG_IWLMEI) += mei/ CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 1572097bccf1..330ef04ca51a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 67 +#define IWL_22000_UCODE_API_MAX 69 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -54,7 +54,13 @@ #define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" #define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" #define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" -#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-" +#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-" +#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-" +#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-" +#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-" +#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-" +#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-" +#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ @@ -113,6 +119,18 @@ IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \ + IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -234,7 +252,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ @@ -626,7 +644,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -639,7 +657,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -652,7 +670,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -665,7 +683,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -696,13 +714,6 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { .num_rbds = IWL_NUM_RBDS_NON_HE, }; -const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { - .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, @@ -879,6 +890,47 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = { + .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = { + .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = { + .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = { + .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = { + .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = { + .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -907,3 +959,8 @@ MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index fbd57a2b2bd5..90b9becd1673 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1974,12 +1974,8 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode) /* SKU Control */ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP, - (CSR_HW_REV_STEP(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) | - (CSR_HW_REV_DASH(priv->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH, + CSR_HW_REV_STEP_DASH(priv->trans->hw_rev)); /* write radio config values to register */ if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index bf431fa4fe81..790c96df58cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -242,17 +242,16 @@ found: IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - __le32 *block_list_array, - int *block_list_size) + struct iwl_tas_config_cmd_v3 *cmd) { union acpi_object *wifi_pkg, *data; - int ret, tbl_rev, i; - bool enabled; + int ret, tbl_rev, i, block_list_size, enabled; data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* try to read wtas table revision 1 or revision 0*/ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WTAS_WIFI_DATA_SIZE, &tbl_rev); @@ -261,40 +260,54 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || - tbl_rev != 0) { + if (tbl_rev == 1 && wifi_pkg->package.elements[1].type == + ACPI_TYPE_INTEGER) { + u32 tas_selection = + (u32)wifi_pkg->package.elements[1].integer.value; + u16 override_iec = + (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS; + u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >> + ACPI_WTAS_ENABLE_IEC_POS; + + enabled = tas_selection & ACPI_WTAS_ENABLED_MSK; + cmd->override_tas_iec = cpu_to_le16(override_iec); + cmd->enable_tas_iec = cpu_to_le16(enabled_iec); + + } else if (tbl_rev == 0 && + wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { + enabled = !!wifi_pkg->package.elements[1].integer.value; + } else { ret = -EINVAL; goto out_free; } - enabled = !!wifi_pkg->package.elements[1].integer.value; - if (!enabled) { - *block_list_size = -1; IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); ret = 0; goto out_free; } + IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev); if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].integer.value > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", - wifi_pkg->package.elements[1].integer.value); + wifi_pkg->package.elements[2].integer.value); ret = -EINVAL; goto out_free; } - *block_list_size = wifi_pkg->package.elements[2].integer.value; + block_list_size = wifi_pkg->package.elements[2].integer.value; + cmd->block_list_size = cpu_to_le32(block_list_size); - IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size); - if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) { + IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size); + if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", - *block_list_size); + block_list_size); ret = -EINVAL; goto out_free; } - for (i = 0; i < *block_list_size; i++) { + for (i = 0; i < block_list_size; i++) { u32 country; if (wifi_pkg->package.elements[3 + i].type != @@ -306,11 +319,11 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, } country = wifi_pkg->package.elements[3 + i].integer.value; - block_list_array[i] = cpu_to_le32(country); + cmd->block_list_array[i] = cpu_to_le32(country); IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } - ret = 0; + ret = 1; out_free: kfree(data); return ret; @@ -789,7 +802,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) * looking up in ACPI */ if (wifi_pkg->package.count != - min_size + profile_size * num_profiles) { + hdr_size + profile_size * num_profiles) { ret = -EINVAL; goto out_free; } @@ -852,6 +865,8 @@ read_table: } } + fwrt->geo_num_profiles = num_profiles; + fwrt->geo_enabled = true; ret = 0; out_free: kfree(data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 4aaa8a6b071b..22b3c665f91a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -65,10 +65,19 @@ #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* - * 1 type, 1 enabled, 1 block list size, 16 block list array + * TAS size: 1 elelment for type, + * 1 element for enabled field, + * 1 element for block list size, + * 16 elements for block list array */ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) +#define ACPI_WTAS_ENABLED_MSK 0x1 +#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2 +#define ACPI_WTAS_ENABLE_IEC_MSK 0x4 +#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1 +#define ACPI_WTAS_ENABLE_IEC_POS 0x2 + #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) @@ -105,6 +114,11 @@ struct iwl_geo_profile { struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; +/* Same thing as with SAR, all revisions fit in revision 2 */ +struct iwl_ppag_chain { + s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; +}; + enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, @@ -198,8 +212,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles); -int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, - int *block_list_size); +int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, + struct iwl_tas_config_cmd_v3 *cmd); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); @@ -280,8 +294,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) } static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - __le32 *block_list_array, - int *block_list_size) + struct iwl_tas_config_cmd_v3 *cmd) { return -ENOENT; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index c840a97e6a62..e00ab21e7358 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -97,6 +97,21 @@ struct iwl_alive_ntf_v5 { struct iwl_sku_id sku_id; } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */ +struct iwl_imr_alive_info { + __le64 base_addr; + __le32 size; + __le32 enabled; +} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */ + +struct iwl_alive_ntf_v6 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; + struct iwl_sku_id sku_id; + struct iwl_imr_alive_info imr; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */ + /** * enum iwl_extended_cfg_flag - commands driver may send before * finishing init flow @@ -143,15 +158,6 @@ enum iwl_card_state_flags { }; /** - * struct iwl_radio_version_notif - information on the card state - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: &enum iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** * enum iwl_error_recovery_flags - flags for error recovery cmd * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index ee6b5844a871..0703e41403a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -51,7 +51,7 @@ enum iwl_legacy_cmds { * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or - * &struct iwl_alive_ntf_v5. + * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6. */ UCODE_ALIVE_NTFY = 0x1, @@ -72,7 +72,8 @@ enum iwl_legacy_cmds { /** * @PHY_CONTEXT_CMD: - * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. + * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd + * or &struct iwl_phy_context_cmd_v1. */ PHY_CONTEXT_CMD = 0x8, @@ -90,7 +91,8 @@ enum iwl_legacy_cmds { /** * @SCAN_CFG_CMD: - * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config + * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2 + * or &struct iwl_scan_config */ SCAN_CFG_CMD = 0xc, @@ -356,7 +358,7 @@ enum iwl_legacy_cmds { * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics, - * &struct iwl_statistics_operational_ntfy + * &struct iwl_statistics_operational_ntfy_ver_14 */ STATISTICS_CMD = 0x9c, @@ -365,6 +367,7 @@ enum iwl_legacy_cmds { * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistic, + * &struct iwl_statistics_operational_ntfy_ver_14 * &struct iwl_statistics_operational_ntfy */ STATISTICS_NOTIFICATION = 0x9d, @@ -383,13 +386,6 @@ enum iwl_legacy_cmds { REDUCE_TX_POWER_CMD = 0x9f, /** - * @CARD_STATE_NOTIFICATION: - * Card state (RF/CT kill) notification, - * uses &struct iwl_card_state_notif - */ - CARD_STATE_NOTIFICATION = 0xa1, - - /** * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif */ MISSED_BEACONS_NOTIFICATION = 0xa2, @@ -612,6 +608,11 @@ enum iwl_system_subcmd_ids { * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd */ RFI_GET_FREQ_TABLE_CMD = 0xc, + + /** + * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd + */ + SYSTEM_FEATURES_CONTROL_CMD = 0xd, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 1503119ea910..4cd9ab23954e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -554,7 +554,7 @@ struct iwl_wowlan_gtk_status_v1 { } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ /** - * struct iwl_wowlan_gtk_status - GTK status + * struct iwl_wowlan_gtk_status_v2 - GTK status * @key: GTK material * @key_len: GTK legth, if set to 0, the key is not available * @key_flags: information about the key: @@ -565,7 +565,7 @@ struct iwl_wowlan_gtk_status_v1 { * @tkip_mic_key: TKIP RX MIC key * @rsc: TSC RSC counters */ -struct iwl_wowlan_gtk_status { +struct iwl_wowlan_gtk_status_v2 { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 key_len; u8 key_flags; @@ -574,6 +574,41 @@ struct iwl_wowlan_gtk_status { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ +/** + * struct iwl_wowlan_all_rsc_tsc_v5 - key counters + * @ucast_rsc: unicast RSC values + * @mcast_rsc: multicast RSC values (per key map value) + * @sta_id: station ID + * @mcast_key_id_map: map of key id to @mcast_rsc entry + */ +struct iwl_wowlan_all_rsc_tsc_v5 { + __le64 ucast_rsc[IWL_MAX_TID_COUNT]; + __le64 mcast_rsc[2][IWL_MAX_TID_COUNT]; + __le32 sta_id; + u8 mcast_key_id_map[4]; +} __packed; /* ALL_TSC_RSC_API_S_VER_5 */ + +/** + * struct iwl_wowlan_gtk_status_v3 - GTK status + * @key: GTK material + * @key_len: GTK length, if set to 0, the key is not available + * @key_flags: information about the key: + * bits[0:1]: key index assigned by the AP + * bits[2:6]: GTK index of the key in the internal DB + * bit[7]: Set iff this is the currently used GTK + * @reserved: padding + * @tkip_mic_key: TKIP RX MIC key + * @sc: RSC/TSC counters + */ +struct iwl_wowlan_gtk_status_v3 { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 key_len; + u8 key_flags; + u8 reserved[2]; + u8 tkip_mic_key[IWL_MIC_KEY_SIZE]; + struct iwl_wowlan_all_rsc_tsc_v5 sc; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */ + #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) /** @@ -640,7 +675,7 @@ struct iwl_wowlan_status_v6 { * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v7 { - struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; @@ -676,7 +711,7 @@ struct iwl_wowlan_status_v7 { * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v9 { - struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; @@ -693,6 +728,44 @@ struct iwl_wowlan_status_v9 { u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */ +/** + * struct iwl_wowlan_status_v12 - WoWLAN status + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched pattern + * @non_qos_seq_ctr: non-QoS sequence counter to use next. + * Reserved if the struct has version >= 10. + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @reserved: unused + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_status_v12 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 reserved[3]; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */ + /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 985b0dc5b52a..89236f42c5a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -32,6 +32,11 @@ enum iwl_data_path_subcmd_ids { STA_HE_CTXT_CMD = 0x7, /** + * @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd + */ + RLC_CONFIG_CMD = 0x8, + + /** * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config */ RFH_QUEUE_CONFIG_CMD = 0xD, @@ -195,4 +200,61 @@ struct iwl_thermal_dual_chain_request { __le32 event; } __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */ +enum iwl_rlc_chain_info { + IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0), + IWL_RLC_CHAIN_INFO_VALID = 0x000e, + IWL_RLC_CHAIN_INFO_FORCE = 0x0070, + IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380, + IWL_RLC_CHAIN_INFO_COUNT = 0x0c00, + IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000, +}; + +/** + * struct iwl_rlc_properties - RLC properties + * @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info + * @reserved: reserved + */ +struct iwl_rlc_properties { + __le32 rx_chain_info; + __le32 reserved; +} __packed; /* RLC_PROPERTIES_S_VER_1 */ + +enum iwl_sad_mode { + IWL_SAD_MODE_ENABLED = BIT(0), + IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6, + IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0, + IWL_SAD_MODE_DEFAULT_ANT_A = 0x2, + IWL_SAD_MODE_DEFAULT_ANT_B = 0x4, +}; + +/** + * struct iwl_sad_properties - SAD properties + * @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode + * @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode + * @mac_id: MAC index + * @reserved: reserved + */ +struct iwl_sad_properties { + __le32 chain_a_sad_mode; + __le32 chain_b_sad_mode; + __le32 mac_id; + __le32 reserved; +} __packed; + +/** + * struct iwl_rlc_config_cmd - RLC configuration + * @phy_id: PHY index + * @rlc: RLC properties, &struct iwl_rlc_properties + * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties + * @flags: flags, &enum iwl_rlc_flags + * @reserved: reserved + */ +struct iwl_rlc_config_cmd { + __le32 phy_id; + struct iwl_rlc_properties rlc; + struct iwl_sad_properties sad; + u8 flags; + u8 reserved[3]; +} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 3988f5fea33a..456b7eaac570 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -7,7 +7,6 @@ #include <linux/bitops.h> -#define IWL_FW_INI_HW_SMEM_REGION_ID 15 #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 #define IWL_FW_INI_MAX_CFG_NAME 64 @@ -124,6 +123,9 @@ struct iwl_fw_ini_region_internal_buffer { * @hdr: debug header * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type + * @sub_type: region sub type + * @sub_type_ver: region sub type version + * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, @@ -146,7 +148,10 @@ struct iwl_fw_ini_region_internal_buffer { struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header hdr; __le32 id; - __le32 type; + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_dev_addr dev_addr; @@ -306,6 +311,7 @@ enum iwl_fw_ini_config_set_type { * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration + * @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { @@ -313,6 +319,7 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, + IWL_FW_INI_ALLOCATION_ID_DBGC4, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ @@ -379,6 +386,17 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ +enum iwl_fw_ini_region_device_memory_subtype { + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20, +}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */ + /** * enum iwl_fw_ini_time_point * @@ -465,4 +483,17 @@ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9), IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10), }; + +/** + * enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset + * + * @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default) + * @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW + * @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW + */ +enum iwl_fw_ini_trigger_reset_fw_policy { + IWL_FW_INI_RESET_FW_MODE_NOTHING = 0, + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY, + IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW +}; #endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 3551a3f1c1aa..4949fcf85257 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -34,6 +34,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { TAS_CONFIG = 0x3, /** + * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd + */ + SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, + + /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy */ PNVM_INIT_COMPLETE_NTFY = 0xFE, @@ -388,18 +393,33 @@ enum iwl_mcc_source { MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; -#define IWL_TAS_BLACK_LIST_MAX 16 +#define IWL_TAS_BLOCK_LIST_MAX 16 /** - * struct iwl_tas_config_cmd - configures the TAS + * struct iwl_tas_config_cmd_v2 - configures the TAS * @block_list_size: size of relevant field in block_list_array - * @block_list_array: block list countries (without TAS) + * @block_list_array: list of countries where TAS must be disabled */ -struct iwl_tas_config_cmd { +struct iwl_tas_config_cmd_v2 { __le32 block_list_size; - __le32 block_list_array[IWL_TAS_BLACK_LIST_MAX]; + __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ /** + * struct iwl_tas_config_cmd_v3 - configures the TAS + * @block_list_size: size of relevant field in block_list_array + * @block_list_array: list of countries where TAS must be disabled + * @override_tas_iec: indicates whether to override default value of IEC regulatory + * @enable_tas_iec: in case override_tas_iec is set - + * indicates whether IEC regulatory is enabled or disabled + */ +struct iwl_tas_config_cmd_v3 { + __le32 block_list_size; + __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; + __le16 override_tas_iec; + __le16 enable_tas_iec; +} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */ + +/** * enum iwl_lari_configs - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 68b788b92b7a..e66f77924f83 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -150,11 +150,12 @@ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_3 */ + /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */ struct iwl_fw_channel_info ci; __le32 lmac_id; - __le32 rxchain_info; + __le32 rxchain_info; /* reserved in _VER_4 */ __le32 dsp_cfg_flags; __le32 reserved; -} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */ +} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */ + #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 4d671c878bb7..81318208f2f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -419,7 +419,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 { * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v2 { __le32 ops; @@ -431,7 +431,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 { * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; @@ -443,7 +443,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 { * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v4 { __le32 ops; @@ -455,7 +455,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 { * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v5 { __le32 ops; @@ -503,6 +503,20 @@ union iwl_ppag_table_cmd { } v2; } __packed; +#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 +#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 + +/** + * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD + * @offset_map: mapping a mcc to a geo sar group + * @reserved: reserved + */ +struct iwl_sar_offset_mapping_cmd { + u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE] + [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE]; + u16 reserved; +} __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ + /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index a09081d7ed45..173a6991587b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -116,9 +116,20 @@ enum IWL_TLC_MNG_NSS { IWL_TLC_NSS_MAX }; -enum IWL_TLC_HT_BW_RATES { - IWL_TLC_HT_BW_NONE_160, - IWL_TLC_HT_BW_160, +/** + * enum IWL_TLC_MCS_PER_BW - mcs index per BW + * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz + * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz + * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz + * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3 + * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4 + */ +enum IWL_TLC_MCS_PER_BW { + IWL_TLC_MCS_PER_BW_80, + IWL_TLC_MCS_PER_BW_160, + IWL_TLC_MCS_PER_BW_320, + IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1, + IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1, }; /** @@ -131,8 +142,8 @@ enum IWL_TLC_HT_BW_RATES { * @amsdu: TX amsdu is supported * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates - * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width> - * pair (0 - 80mhz width and below, 1 - 160mhz). + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW + * <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz). * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(@enum iwl_tlc_mng_cfg_cw) @@ -140,7 +151,7 @@ enum IWL_TLC_HT_BW_RATES { * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ -struct iwl_tlc_config_cmd { +struct iwl_tlc_config_cmd_v3 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; @@ -149,7 +160,7 @@ struct iwl_tlc_config_cmd { u8 amsdu; __le16 flags; __le16 non_ht_rates; - __le16 ht_rates[IWL_TLC_NSS_MAX][2]; + __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2; @@ -157,6 +168,37 @@ struct iwl_tlc_config_cmd { } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */ /** + * struct tlc_config_cmd - TLC configuration + * @sta_id: station id + * @reserved1: reserved + * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw + * @mode: &enum iwl_tlc_mng_cfg_mode + * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains + * @sgi_ch_width_supp: bitmap of SGI support per channel width + * use BIT(&enum iwl_tlc_mng_cfg_cw) + * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags + * @non_ht_rates: bitmap of supported legacy rates + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width> + * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz). + * @max_mpdu_len: max MPDU length, in bytes + * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), + * set zero for no limit. + */ +struct iwl_tlc_config_cmd_v4 { + u8 sta_id; + u8 reserved1[3]; + u8 max_ch_width; + u8 mode; + u8 chains; + u8 sgi_ch_width_supp; + __le16 flags; + __le16 non_ht_rates; + __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4]; + __le16 max_mpdu_len; + __le16 max_tx_op; +} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */ + +/** * enum iwl_tlc_update_flags - updated fields * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 8b200379f7c2..5413087ae909 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -82,6 +82,16 @@ enum iwl_scan_offload_band_selection { IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, }; +enum iwl_scan_offload_auth_alg { + IWL_AUTH_ALGO_UNSUPPORTED = 0x00, + IWL_AUTH_ALGO_NONE = 0x01, + IWL_AUTH_ALGO_PSK = 0x02, + IWL_AUTH_ALGO_8021X = 0x04, + IWL_AUTH_ALGO_SAE = 0x08, + IWL_AUTH_ALGO_8021X_SHA384 = 0x10, + IWL_AUTH_ALGO_OWE = 0x20, +}; + /** * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S * @ssid_index: index to ssid list in fixed part @@ -201,7 +211,7 @@ struct iwl_scan_channel_cfg_lmac { __le32 iter_interval; } __packed; -/* +/** * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 * @offset: offset in the data block * @len: length of the segment @@ -211,7 +221,8 @@ struct iwl_scan_probe_segment { __le16 len; } __packed; -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 +/** + * struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe @@ -224,7 +235,8 @@ struct iwl_scan_probe_req_v1 { u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 +/** + * struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe @@ -247,7 +259,8 @@ enum iwl_scan_channel_flags { IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6), }; -/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S +/** + * struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S * @flags: enum iwl_scan_channel_flags * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is * involved. @@ -492,7 +505,7 @@ struct iwl_scan_dwell { } __packed; /** - * struct iwl_scan_config_v1 + * struct iwl_scan_config_v1 - scan configuration command * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions @@ -524,6 +537,21 @@ struct iwl_scan_config_v1 { #define SCAN_LB_LMAC_IDX 0 #define SCAN_HB_LMAC_IDX 1 +/** + * struct iwl_scan_config_v2 - scan configuration command + * @flags: enum scan_config_flags + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + * @legacy_rates: default legacy rates - enum scan_config_rates + * @out_of_channel_time: default max out of serving channel time + * @suspend_time: default max suspend time + * @dwell: dwells for the scan + * @mac_addr: default mac address to be used in probes + * @bcast_sta_id: the index of the station in the fw + * @channel_flags: default channel flags - enum iwl_channel_flags + * scan_config_channel_flag + * @channel_array: default supported channels + */ struct iwl_scan_config_v2 { __le32 flags; __le32 tx_chains; @@ -539,7 +567,7 @@ struct iwl_scan_config_v2 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */ /** - * struct iwl_scan_config + * struct iwl_scan_config - scan configuration command * @enable_cam_mode: whether to enable CAM mode. * @enable_promiscouos_mode: whether to enable promiscouos mode * @bcast_sta_id: the index of the station in the fw. Deprecated starting with @@ -640,6 +668,10 @@ enum iwl_umac_scan_general_flags2 { * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is * activated over 6GHz PSC channels, filter in beacons and probe responses. + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum + * rate of 5.5Mpbs, filter in broadcast probe responses and set the max + * channel time indication field in the FILS request parameters element + * (if included by the driver in the probe request IEs). */ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0), @@ -657,6 +689,20 @@ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14), + IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15), +}; + +/** + * enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2 + * + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling + * should be aware of a P2P GO operation on the 2GHz band. + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling + * should be aware of a P2P GO operation on the 5GHz or 6GHz band. + */ +enum iwl_umac_scan_general_params_flags2 { + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0), + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1), }; /** @@ -941,8 +987,8 @@ struct iwl_scan_channel_params_v6 { } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */ /** - * struct iwl_scan_general_params_v10 - * @flags: &enum iwl_umac_scan_flags + * struct iwl_scan_general_params_v11 + * @flags: &enum iwl_umac_scan_general_flags_v2 * @reserved: reserved for future * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @active_dwell: dwell time for active scan per LMAC @@ -952,7 +998,8 @@ struct iwl_scan_channel_params_v6 { * for 5GHz channels * @adwell_default_social_chn: adaptive dwell default number of * APs per social channel - * @reserved1: reserved for future + * @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2. + * Otherwise reserved. * @adwell_max_budget: the maximal number of TUs that adaptive dwell * can add to the total scan time * @max_out_of_time: max out of serving channel time, per LMAC @@ -963,7 +1010,7 @@ struct iwl_scan_channel_params_v6 { * @num_of_fragments: number of fragments needed for full fragmented * scan coverage. */ -struct iwl_scan_general_params_v10 { +struct iwl_scan_general_params_v11 { __le16 flags; u8 reserved; u8 scan_start_mac_id; @@ -971,14 +1018,14 @@ struct iwl_scan_general_params_v10 { u8 adwell_default_2g; u8 adwell_default_5g; u8 adwell_default_social_chn; - u8 reserved1; + u8 flags2; __le16 adwell_max_budget; __le32 max_out_of_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; -} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */ +} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */ /** * struct iwl_scan_periodic_parms_v1 @@ -994,31 +1041,31 @@ struct iwl_scan_periodic_parms_v1 { /** * struct iwl_scan_req_params_v12 - * @general_params: &struct iwl_scan_general_params_v10 + * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v4 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v3 */ struct iwl_scan_req_params_v12 { - struct iwl_scan_general_params_v10 general_params; + struct iwl_scan_general_params_v11 general_params; struct iwl_scan_channel_params_v4 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v3 probe_params; } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v14 - * @general_params: &struct iwl_scan_general_params_v10 + * struct iwl_scan_req_params_v15 + * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v6 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v4 */ -struct iwl_scan_req_params_v14 { - struct iwl_scan_general_params_v10 general_params; +struct iwl_scan_req_params_v15 { + struct iwl_scan_general_params_v11 general_params; struct iwl_scan_channel_params_v6 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v4 probe_params; -} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */ +} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */ /** * struct iwl_scan_req_umac_v12 @@ -1033,16 +1080,16 @@ struct iwl_scan_req_umac_v12 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v14 + * struct iwl_scan_req_umac_v15 * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters */ -struct iwl_scan_req_umac_v14 { +struct iwl_scan_req_umac_v15 { __le32 uid; __le32 ooc_priority; - struct iwl_scan_req_params_v14 scan_params; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */ + struct iwl_scan_req_params_v15 scan_params; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */ /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 18cca15caa3a..898e62326e6c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -432,6 +432,7 @@ enum iwl_fw_statistics_type { FW_STATISTICS_HE, }; /* FW_STATISTICS_TYPE_API_E_VER_1 */ +#define IWL_STATISTICS_TYPE_MSK 0x7f /** * struct iwl_statistics_ntfy_hdr * @@ -446,10 +447,97 @@ struct iwl_statistics_ntfy_hdr { }; /* STATISTICS_NTFY_HDR_API_S_VER_1 */ /** + * struct iwl_statistics_ntfy_per_mac + * + * @beacon_filter_average_energy: Average energy [-dBm] of the 2 + * antennas. + * @air_time: air time + * @beacon_counter: all beacons (both filtered and not filtered) + * @beacon_average_energy: all beacons (both filtered and not + * filtered) + * @beacon_rssi_a: beacon RSSI on antenna A + * @beacon_rssi_b: beacon RSSI on antenna B + * @rx_bytes: RX byte count + */ +struct iwl_statistics_ntfy_per_mac { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */ + +#define IWL_STATS_MAX_BW_INDEX 5 +/** struct iwl_statistics_ntfy_per_phy + * @channel_load: channel load + * @channel_load_by_us: device contribution to MCLM + * @channel_load_not_by_us: other devices' contribution to MCLM + * @clt: CLT HW timer (TIM_CH_LOAD2) + * @act: active accumulator SW + * @elp: elapsed time accumulator SW + * @rx_detected_per_ch_width: number of deferred TX per channel width, + * 0 - 20, 1/2/3 - 40/80/160 + * @success_per_ch_width: number of frames that got ACK/BACK/CTS + * per channel BW. note, BACK counted as 1 + * @fail_per_ch_width: number of frames that didn't get ACK/BACK/CTS + * per channel BW. note BACK counted as 1 + * @last_tx_ch_width_indx: last txed frame channel width index + */ +struct iwl_statistics_ntfy_per_phy { + __le32 channel_load; + __le32 channel_load_by_us; + __le32 channel_load_not_by_us; + __le32 clt; + __le32 act; + __le32 elp; + __le32 rx_detected_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 success_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 fail_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 last_tx_ch_width_indx; +} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */ + +/** + * struct iwl_statistics_ntfy_per_sta + * + * @average_energy: in fact it is minus the energy.. + */ +struct iwl_statistics_ntfy_per_sta { + __le32 average_energy; +} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ + +#define IWL_STATS_MAX_PHY_OPERTINAL 3 +/** * struct iwl_statistics_operational_ntfy * * @hdr: general statistics header * @flags: bitmap of possible notification structures + * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac + * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy + * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta + * @rx_time: rx time + * @tx_time: usec the radio is transmitting. + * @on_time_rf: The total time in usec the RF is awake. + * @on_time_scan: usec the radio is awake due to scan. + */ +struct iwl_statistics_operational_ntfy { + struct iwl_statistics_ntfy_hdr hdr; + __le32 flags; + struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX]; + struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL]; + struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX]; + __le64 rx_time; + __le64 tx_time; + __le64 on_time_rf; + __le64 on_time_scan; +} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */ + +/** + * struct iwl_statistics_operational_ntfy_ver_14 + * + * @hdr: general statistics header + * @flags: bitmap of possible notification structures * @mac_id: mac on which the beacon was received * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -469,7 +557,7 @@ struct iwl_statistics_ntfy_hdr { * @average_energy: in fact it is minus the energy.. * @reserved: reserved */ -struct iwl_statistics_operational_ntfy { +struct iwl_statistics_operational_ntfy_ver_14 { struct iwl_statistics_ntfy_hdr hdr; __le32 flags; __le32 mac_id; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h index c5df1171462b..acf5d4b9a214 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/system.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ -#ifndef __iwl_fw_api_soc_h__ -#define __iwl_fw_api_soc_h__ +#ifndef __iwl_fw_api_system_h__ +#define __iwl_fw_api_system_h__ #define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0) #define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1) @@ -32,4 +32,12 @@ struct iwl_soc_configuration_cmd { * SOC_CONFIGURATION_CMD_S_VER_2 */ -#endif /* __iwl_fw_api_soc_h__ */ +/** + * struct iwl_system_features_control_cmd - system features control command + * @features: bitmap of features to disable + */ +struct iwl_system_features_control_cmd { + __le32 features[4]; +} __packed; /* SYSTEM_FEATURES_CONTROL_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_system_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 4a74c0ea0f31..e73cc7380a26 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -177,6 +177,17 @@ enum iwl_tx_offload_assist_flags_pos { #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f #define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f +enum iwl_tx_offload_assist_bz { + IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff, + IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800, + IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000, + IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000, + IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000, + IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000, + IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000, + IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000, +}; + /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index a39013c401c9..7ad9cee925da 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -880,7 +880,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = - cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + cpu_to_le32(fwrt->trans->hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->name, @@ -1165,8 +1165,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); - if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) == - IWL_FW_INI_HW_SMEM_REGION_ID && + if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM && fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, range->data, @@ -1565,7 +1564,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG, DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK); for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { - prph_data = iwl_read_prph(fwrt->trans, (i % 2) ? + prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); if (prph_data == 0x5a5a5a5a) { @@ -1988,17 +1987,18 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_dump_entry *entry; - struct iwl_fw_error_dump_data *tlv; + struct iwl_fw_ini_error_dump_data *tlv; struct iwl_fw_ini_error_dump_header *header; - u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id); + u32 type = reg->type; + u32 id = le32_to_cpu(reg->id); u32 num_of_ranges, i, size; void *range; /* - * The higher part of the ID in version 2 is irrelevant for + * The higher part of the ID from 2 is irrelevant for * us, so mask it out. */ - if (le32_to_cpu(reg->hdr.version) == 2) + if (le32_to_cpu(reg->hdr.version) >= 2) id &= IWL_FW_INI_REGION_V2_MASK; if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || @@ -2017,6 +2017,9 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, tlv = (void *)entry->data; tlv->type = reg->type; + tlv->sub_type = reg->sub_type; + tlv->sub_type_ver = reg->sub_type_ver; + tlv->reserved = reg->reserved; tlv->len = cpu_to_le32(size); IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id, @@ -2099,7 +2102,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); - dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); /* * Several HWs all have type == 0x42, so we'll override this value @@ -2107,7 +2110,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { - u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2); + u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); @@ -2291,7 +2294,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, } reg = (void *)reg_data.reg_tlv->data; - reg_type = le32_to_cpu(reg->type); + reg_type = reg->type; if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; @@ -2716,6 +2719,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); + if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) + iwl_force_nmi(fwrt->trans); + out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 016b3a4c5f51..b90f1e9ce691 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -12,6 +12,7 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" +#include "pnvm.h" /* * Note: This structure is read from the device with IO accesses, @@ -19,53 +20,6 @@ * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ -struct iwl_error_event_table_v1 { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 data3; /* error-specific data */ - u32 bcon_time; /* beacon timer */ - u32 tsf_low; /* network timestamp function timer */ - u32 tsf_hi; /* network timestamp function timer */ - u32 gp1; /* GP1 timer register */ - u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ - u32 ucode_ver; /* uCode version */ - u32 hw_ver; /* HW Silicon version */ - u32 brd_ver; /* HW board version */ - u32 log_pc; /* log program counter */ - u32 frame_ptr; /* frame pointer */ - u32 stack_ptr; /* stack pointer */ - u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ - u32 wait_event; /* wait event() caller address */ - u32 l2p_control; /* L2pControlField */ - u32 l2p_duration; /* L2pDurationField */ - u32 l2p_mhvalid; /* L2pMhValidBits */ - u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ - u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; - struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ @@ -147,6 +101,7 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) struct iwl_trans *trans = fwrt->trans; struct iwl_umac_error_event_table table = {}; u32 base = fwrt->trans->dbg.umac_error_event_table; + char pnvm_name[MAX_PNVM_NAME]; if (!base && !(fwrt->trans->dbg.error_event_table_tlv_status & @@ -164,6 +119,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) fwrt->trans->status, table.valid); } + if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) == + FW_SYSASSERT_PNVM_MISSING) { + iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); + IWL_ERR(fwrt, "PNVM data is missing, please install %s\n", + pnvm_name); + } + IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); @@ -212,7 +174,9 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu IWL_ERR(trans, "HW error, resetting before reading\n"); /* reset the device */ - iwl_trans_sw_reset(trans); + err = iwl_trans_sw_reset(trans, true); + if (err) + return; err = iwl_finish_nic_init(trans); if (err) @@ -295,21 +259,21 @@ struct iwl_tcm_error_event_table { u32 reserved[4]; } __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */ -static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt) +static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_tcm_error_event_table table = {}; - u32 base = fwrt->trans->dbg.tcm_error_event_table; + u32 base = fwrt->trans->dbg.tcm_error_event_table[idx]; int i; + u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 : + IWL_ERROR_EVENT_TABLE_TCM1; - if (!base || - !(fwrt->trans->dbg.error_event_table_tlv_status & - IWL_ERROR_EVENT_TABLE_TCM)) + if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - IWL_ERR(fwrt, "TCM status:\n"); + IWL_ERR(fwrt, "TCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1); @@ -328,13 +292,72 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt) for (i = 0; i < ARRAY_SIZE(table.sw_status); i++) IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n", table.sw_status[i], i); +} - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { - u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH); +/* + * RCM error struct. + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_rcm_error_event_table { + u32 valid; + u32 error_id; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1, data2, data3; + u32 logpc; + u32 frame_pointer; + u32 stack_pointer; + u32 msgid; + u32 isr; + u32 frame_hw_status; + u32 mbx_lmac_to_rcm_req; + u32 mbx_rcm_to_lmac_req; + u32 mh_ctl; + u32 mh_addr1_lo; + u32 mh_info; + u32 mh_err; + u32 reserved[3]; +} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */ + +static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx) +{ + struct iwl_trans *trans = fwrt->trans; + struct iwl_rcm_error_event_table table = {}; + u32 base = fwrt->trans->dbg.rcm_error_event_table[idx]; + u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 : + IWL_ERROR_EVENT_TABLE_RCM1; - IWL_ERR(fwrt, "Function Scratch status:\n"); - IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); - } + if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) + return; + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + IWL_ERR(fwrt, "RCM%d status:\n", idx + 1); + IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); + IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2); + IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1); + IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2); + IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1); + IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2); + IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3); + IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc); + IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer); + IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer); + IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid); + IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr); + IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status); + IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n", + table.mbx_lmac_to_rcm_req); + IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n", + table.mbx_rcm_to_lmac_req); + IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl); + IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo); + IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info); + IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err); } static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) @@ -418,8 +441,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) if (fwrt->trans->dbg.lmac_error_event_table[1]) iwl_fwrt_dump_lmac_error_log(fwrt, 1); iwl_fwrt_dump_umac_error_log(fwrt); - iwl_fwrt_dump_tcm_error_log(fwrt); + iwl_fwrt_dump_tcm_error_log(fwrt, 0); + iwl_fwrt_dump_rcm_error_log(fwrt, 0); + iwl_fwrt_dump_tcm_error_log(fwrt, 1); + iwl_fwrt_dump_rcm_error_log(fwrt, 1); iwl_fwrt_dump_iml_error_log(fwrt); iwl_fwrt_dump_fseq_regs(fwrt); + + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH); + + IWL_ERR(fwrt, "Function Scratch status:\n"); + IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); + } } IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 9036b32ec765..079fa0023bd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -232,6 +232,24 @@ struct iwl_fw_error_dump_mem { #define IWL_INI_DUMP_INFO_TYPE BIT(31) /** + * struct iwl_fw_error_dump_data - data for one type + * @type: &enum iwl_fw_ini_region_type + * @sub_type: sub type id + * @sub_type_ver: sub type version + * @reserved: not in use + * @len: the length starting from %data + * @data: the data itself + */ +struct iwl_fw_ini_error_dump_data { + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; + __le32 len; + __u8 data[]; +} __packed; + +/** * struct iwl_fw_ini_dump_entry * @list: list of dump entries * @size: size of the data diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 3d572f5024bb..e4ebda64cd52 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -98,7 +98,6 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_PNVM_VERSION = 62, IWL_UCODE_TLV_PNVM_SKU = 64, - IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65, IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c index 24a966673691..530674a35eeb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2019 - 2020 Intel Corporation + * Copyright(c) 2019 - 2021 Intel Corporation */ #include "img.h" @@ -49,10 +49,9 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def) } EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver); -#define FW_SYSASSERT_CPU_MASK 0xf0000000 static const struct { const char *name; - u8 num; + u32 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, @@ -73,6 +72,7 @@ static const struct { { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING }, { "ADVANCED_SYSASSERT", 0 }, }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 993bda17fa30..fa7b1780064c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -279,4 +279,8 @@ u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); const char *iwl_fw_lookup_assert_desc(u32 num); + +#define FW_SYSASSERT_CPU_MASK 0xf0000000 +#define FW_SYSASSERT_PNVM_MISSING 0x0010070d + #endif /* __iwl_fw_img_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 566957ac4539..139ece879fab 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -8,7 +8,7 @@ #include "dbg.h" #include "debugfs.h" -#include "fw/api/soc.h" +#include "fw/api/system.h" #include "fw/api/commands.h" #include "fw/api/rx.h" #include "fw/api/datapath.h" diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 69799f1ed2c4..3cb0ddbe3ab2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -156,8 +156,13 @@ struct iwl_fw_runtime { u8 sar_chain_b_profile; struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; u32 geo_rev; - union iwl_ppag_table_cmd ppag_table; + u32 geo_num_profiles; + bool geo_enabled; + struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; + u32 ppag_flags; u32 ppag_ver; + struct iwl_sar_offset_mapping_cmd sgom_table; + bool sgom_enabled; #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index c875bf35533c..bd82c24811c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -11,6 +11,7 @@ #include "fw/uefi.h" #include "fw/api/alive.h" #include <linux/efi.h> +#include "fw/runtime.h" #define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \ 0xb2, 0xec, 0xf5, 0xa3, \ @@ -86,6 +87,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-EINVAL); goto out; } @@ -105,6 +107,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "Couldn't allocate (more) reduce_power_data\n"); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOMEM); goto out; } @@ -134,6 +137,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, done: if (!size) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); + /* Better safe than sorry, but 'reduce_power_data' should + * always be NULL if !size. + */ + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOENT); goto out; } @@ -260,3 +267,90 @@ out: return data; } + +#ifdef CONFIG_ACPI +static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, + struct iwl_fw_runtime *fwrt) +{ + int i, j; + + if (sgom_data->revision != 1) + return -EINVAL; + + memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map, + sizeof(fwrt->sgom_table.offset_map)); + + for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) { + for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) { + /* since each byte is composed of to values, */ + /* one for each letter, */ + /* extract and check each of them separately */ + u8 value = fwrt->sgom_table.offset_map[i][j]; + u8 low = value & 0xF; + u8 high = (value & 0xF0) >> 4; + + if (high > fwrt->geo_num_profiles) + high = 0; + if (low > fwrt->geo_num_profiles) + low = 0; + fwrt->sgom_table.offset_map[i][j] = (high << 4) | low; + } + } + + fwrt->sgom_enabled = true; + return 0; +} + +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + struct efivar_entry *sgom_efivar; + struct uefi_cnv_wlan_sgom_data *data; + unsigned long package_size; + int err, ret; + + if (!fwrt->geo_enabled) + return; + + sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL); + if (!sgom_efivar) + return; + + memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME, + sizeof(IWL_UEFI_SGOM_NAME)); + sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + + /* TODO: we hardcode a maximum length here, because reading + * from the UEFI is not working. To implement this properly, + * we have to call efivar_entry_size(). + */ + package_size = IWL_HARDCODED_SGOM_SIZE; + + data = kmalloc(package_size, GFP_KERNEL); + if (!data) { + data = ERR_PTR(-ENOMEM); + goto out; + } + + err = efivar_entry_get(sgom_efivar, NULL, &package_size, data); + if (err) { + IWL_DEBUG_FW(trans, + "SGOM UEFI variable not found %d\n", err); + goto out_free; + } + + IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_sgom_parse(data, fwrt); + if (ret < 0) + IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n"); + +out_free: + kfree(data); + +out: + kfree(sgom_efivar); +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); +#endif /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index d552c656ac9f..09d2a971b3a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -7,6 +7,7 @@ #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" +#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" /* * TODO: we have these hardcoded values that the caller must pass, @@ -16,6 +17,7 @@ */ #define IWL_HARDCODED_PNVM_SIZE 4096 #define IWL_HARDCODED_REDUCE_POWER_SIZE 32768 +#define IWL_HARDCODED_SGOM_SIZE 339 struct pnvm_sku_package { u8 rev; @@ -25,6 +27,16 @@ struct pnvm_sku_package { u8 data[]; } __packed; +struct uefi_cnv_wlan_sgom_data { + u8 revision; + u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1]; +} __packed; + +/* + * This is known to be broken on v4.19 and to work on v5.4. Until we + * figure out why this is the case and how to make it work, simply + * disable the feature in old kernels. + */ #ifdef CONFIG_EFI void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len); void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); @@ -42,4 +54,12 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) } #endif /* CONFIG_EFI */ +#if defined(CONFIG_EFI) && defined(CONFIG_ACPI) +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +#else +static inline +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) +{ +} +#endif #endif /* __iwl_fw_uefi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 665167a223f6..e122b8b4e1fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -84,6 +84,10 @@ enum iwl_nvm_type { #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) +#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) +#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ + IWL_TX_CSUM_NETIF_FLAGS_BZ | \ + NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 @@ -448,6 +452,9 @@ struct iwl_cfg { #define IWL_CFG_NO_CDB 0x0 #define IWL_CFG_CDB 0x1 +#define IWL_CFG_NO_JACKET 0x0 +#define IWL_CFG_IS_JACKET 0x1 + #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) #define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) @@ -462,6 +469,7 @@ struct iwl_dev_info { u8 no_160; u8 cores; u8 cdb; + u8 jacket; const struct iwl_cfg *cfg; const char *name; }; @@ -610,7 +618,6 @@ extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; -extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; @@ -634,6 +641,12 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; +extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index ff79a2ecb242..f90d4662c164 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -105,9 +105,14 @@ /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) -/* Doorbell NMI (since Bz) */ +#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114) +#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3 +#define CSR_IPC_SLEEP_CONTROL_RESUME 0 + +/* Doorbell - since Bz + * connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only) + */ #define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) -#define CSR_DOORBELL_VECTOR_NMI BIT(1) /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) @@ -143,8 +148,7 @@ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) /* Bits for CSR_HW_IF_CONFIG_REG */ -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) +#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) #define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) @@ -287,8 +291,7 @@ #define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ -#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) -#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) +#define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH) #define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4) /* HW RFID */ @@ -306,6 +309,7 @@ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, + SILICON_Z_STEP = 0xf, }; @@ -328,10 +332,10 @@ enum { #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) -#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) -#define CSR_HW_REV_TYPE_QU_B0 (0x0000334) -#define CSR_HW_REV_TYPE_QU_C0 (0x0000338) -#define CSR_HW_REV_TYPE_QUZ (0x0000354) +#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361) +#define CSR_HW_REV_TYPE_QU_B0 (0x0000331) +#define CSR_HW_REV_TYPE_QU_C0 (0x0000332) +#define CSR_HW_REV_TYPE_QUZ (0x0000351) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 7ab98b419cc1..c73672d61356 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -59,7 +59,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, - [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,}, + [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,}, [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, }; @@ -177,14 +177,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data; struct iwl_ucode_tlv **active_reg; u32 id = le32_to_cpu(reg->id); - u32 type = le32_to_cpu(reg->type); + u8 type = reg->type; u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length); /* - * The higher part of the ID in version 2 is irrelevant for + * The higher part of the ID in from version 2 is irrelevant for * us, so mask it out. */ - if (le32_to_cpu(reg->hdr.version) == 2) + if (le32_to_cpu(reg->hdr.version) >= 2) id &= IWL_FW_INI_REGION_V2_MASK; if (le32_to_cpu(tlv->length) < sizeof(*reg)) @@ -233,6 +233,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; struct iwl_fw_ini_trigger_tlv *dup_trig; u32 tp = le32_to_cpu(trig->time_point); + u32 rf = le32_to_cpu(trig->reset_fw); struct iwl_ucode_tlv *dup = NULL; int ret; @@ -247,6 +248,10 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return -EINVAL; } + IWL_DEBUG_FW(trans, + "WRT: time point %u for trigger TLV with reset_fw %u\n", + tp, rf); + trans->dbg.last_tp_resetfw = 0xFF; if (!le32_to_cpu(trig->occurrences)) { dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), GFP_KERNEL); @@ -300,14 +305,21 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext) { - const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; - u32 type = le32_to_cpu(tlv->type); - u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; - u32 domain = le32_to_cpu(hdr->domain); enum iwl_ini_cfg_state *cfg_state = ext ? &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; + const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; + u32 type; + u32 tlv_idx; + u32 domain; int ret; + if (le32_to_cpu(tlv->length) < sizeof(*hdr)) + return; + + type = le32_to_cpu(tlv->type); + tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; + domain = le32_to_cpu(hdr->domain); + if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON && !(domain & trans->dbg.domains_bitmap)) { IWL_DEBUG_FW(trans, @@ -473,7 +485,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) int res; if (!iwlwifi_mod_params.enable_ini || - trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) + trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) return; res = firmware_request_nowarn(&fw, yoyo_bin, dev); @@ -1159,6 +1171,8 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig, data); int ret, i; + u32 tp = le32_to_cpu(dump_data.trig->time_point); + if (!num_data) { ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); @@ -1177,8 +1191,42 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, break; } } - } + fwrt->trans->dbg.restart_required = FALSE; + IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n", + tp, dump_data.trig->reset_fw); + IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n", + fwrt->trans->dbg.restart_required, + fwrt->trans->dbg.last_tp_resetfw); + + if (fwrt->trans->trans_cfg->device_family == + IWL_DEVICE_FAMILY_9000) { + fwrt->trans->dbg.restart_required = TRUE; + } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && + fwrt->trans->dbg.last_tp_resetfw == + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { + fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.last_tp_resetfw = 0xFF; + IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { + IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n"); + fwrt->trans->dbg.restart_required = TRUE; + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { + IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n"); + fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.last_tp_resetfw = + le32_to_cpu(dump_data.trig->reset_fw); + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_NOTHING) { + IWL_DEBUG_INFO(fwrt, + "WRT: nothing need to be done after debug collection\n"); + } else { + IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", + le32_to_cpu(dump_data.trig->reset_fw)); + } + } return 0; } @@ -1244,7 +1292,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) } reg = (void *)(*active_reg)->data; - reg_type = le32_to_cpu(reg->type); + reg_type = reg->type; if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER || !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 36196e07b1a0..83e3b731ad29 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -130,6 +130,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); + + /* clear the data for the aborted load case */ + memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, @@ -163,8 +166,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) char tag[8]; if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && - (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && - CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { + (drv->trans->hw_rev_step != SILICON_B_STEP && + drv->trans->hw_rev_step != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev); @@ -586,6 +589,66 @@ static void iwl_drv_set_dump_exclude(struct iwl_drv *drv, excl->size = le32_to_cpu(fw->size); } +static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv, + const struct iwl_ucode_tlv *tlv) +{ + const struct iwl_fw_ini_region_tlv *region; + u32 length = le32_to_cpu(tlv->length); + u32 addr; + + if (length < offsetof(typeof(*region), special_mem) + + sizeof(region->special_mem)) + return; + + region = (void *)tlv->data; + addr = le32_to_cpu(region->special_mem.base_addr); + addr += le32_to_cpu(region->special_mem.offset); + addr &= ~FW_ADDR_CACHE_CONTROL; + + if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY) + return; + + switch (region->sub_type) { + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE: + drv->trans->dbg.umac_error_event_table = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_UMAC; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE: + drv->trans->dbg.lmac_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_LMAC1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE: + drv->trans->dbg.lmac_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_LMAC2; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE: + drv->trans->dbg.tcm_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_TCM1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE: + drv->trans->dbg.tcm_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_TCM2; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE: + drv->trans->dbg.rcm_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_RCM1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE: + drv->trans->dbg.rcm_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_RCM2; + break; + default: + break; + } +} + static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, @@ -1153,21 +1216,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_ERROR_EVENT_TABLE_LMAC1; break; } - case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: { - struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data; - - if (tlv_len != sizeof(*ptr)) - goto invalid_tlv_len; - drv->trans->dbg.tcm_error_event_table = - le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL; - drv->trans->dbg.error_event_table_tlv_status |= - IWL_ERROR_EVENT_TABLE_TCM; - break; - } + case IWL_UCODE_TLV_TYPE_REGIONS: + iwl_parse_dbg_tlv_assert_tables(drv, tlv); + fallthrough; case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: - case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_CONF_SET: if (iwlwifi_mod_params.enable_ini) @@ -1313,23 +1367,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - dbgfs_dir = drv->dbgfs_op_mode; + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + dbgfs_dir = drv->dbgfs_op_mode; #endif - op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); + op_mode = ops->start(drv->trans, drv->trans->cfg, + &drv->fw, dbgfs_dir); + + if (op_mode) + return op_mode; + + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (!op_mode) { debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; - } #endif + } - return op_mode; + return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) @@ -1367,6 +1429,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int i; bool load_module = false; bool usniffer_images = false; + bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1627,15 +1690,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * else from proceeding if the module fails to load * or hangs loading. */ - if (load_module) { + if (load_module) request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR - if (err) - IWL_ERR(drv, - "failed to load module %s (error %d), is dynamic loading enabled?\n", - op->name, err); -#endif - } + failure = false; goto free; try_again: @@ -1651,6 +1708,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: + if (failure) + iwl_dealloc_ucode(drv); + if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 2e2d60a58692..0fd009e6d685 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv); #define IWL_EXPORT_SYMBOL(sym) #endif +/* max retry for init flow */ +#define IWL_MAX_INIT_RETRY 2 + #endif /* __iwl_drv_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index f12b86563728..d7a7835b935c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -10,6 +10,7 @@ #include "iwl-modparams.h" #include "iwl-eeprom-parse.h" +#if IS_ENABLED(CONFIG_IWLDVM) /* EEPROM offset definitions */ /* indirect access definitions */ @@ -647,6 +648,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, return n_channels; } +#endif int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, @@ -750,6 +752,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans, } } +#if IS_ENABLED(CONFIG_IWLDVM) static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) @@ -873,3 +876,4 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index aaa3b65be4e6..e6fd4941a4cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -580,7 +580,7 @@ struct iwl_rb_status { __le16 closed_fr_num; __le16 finished_rb_num; __le16 finished_fr_nam; - __le32 __unused; + __le32 __spare; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 46917b4216b3..253eac4cbf59 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -218,7 +218,7 @@ void iwl_force_nmi(struct iwl_trans *trans) UREG_DOORBELL_TO_ISR6_NMI_BIT); else iwl_write32(trans, CSR_DOORBELL_VECTOR, - CSR_DOORBELL_VECTOR_NMI); + UREG_DOORBELL_TO_ISR6_NMI_BIT); } IWL_EXPORT_SYMBOL(iwl_force_nmi); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index f470f9aea50f..dd58c8f9aa11 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -22,6 +22,7 @@ #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" +#include "mei/iwl-mei.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -607,7 +608,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS), .phy_cap_info[10] = IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, @@ -664,7 +666,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED + << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -1115,6 +1118,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, } struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw) +{ + struct iwl_nvm_data *data; + u32 sbands_flags = 0; + u8 rx_chains = fw->valid_rx_ant; + u8 tx_chains = fw->valid_rx_ant; + + if (cfg->uhb_supported) + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_UHB), + GFP_KERNEL); + else + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_EXT), + GFP_KERNEL); + if (!data) + return NULL; + + BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) != + IWL_NVM_NUM_CHANNELS_UHB); + data->nvm_version = mei_nvm->nvm_version; + + iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg); + if (data->valid_tx_ant) + tx_chains &= data->valid_tx_ant; + if (data->valid_rx_ant) + rx_chains &= data->valid_rx_ant; + + data->sku_cap_mimo_disabled = false; + data->sku_cap_band_24ghz_enable = true; + data->sku_cap_band_52ghz_enable = true; + data->sku_cap_11n_enable = + !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL); + data->sku_cap_11ac_enable = true; + data->sku_cap_11ax_enable = + mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT; + + data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT; + + data->n_hw_addrs = mei_nvm->n_hw_addrs; + /* If no valid mac address was found - bail out */ + if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) { + kfree(data); + return NULL; + } + + if (data->lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + + iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains, + sbands_flags, true, fw); + + return data; +} +IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); + +struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, @@ -1548,7 +1611,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans, /* nvm file validation, dword_buff[2] holds the file version */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && + trans->hw_rev_step == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index e1f5a9741850..e01f7751cf11 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2020 Intel Corporation + * Copyright (C) 2005-2015, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -8,6 +8,7 @@ #include <net/cfg80211.h> #include "iwl-eeprom-parse.h" +#include "mei/iwl-mei.h" /** * enum iwl_nvm_sbands_flags - modification flags for the channel profiles @@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw); +/** + * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data + */ +struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw); + #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index a84ab02cf9d7..95b3dae7b504 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -347,9 +347,7 @@ #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 -#define WFPM_CTRL_REG_GEN2 0xd03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 -#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098 #define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) #define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) @@ -455,6 +453,13 @@ enum { #define UREG_DOORBELL_TO_ISR6_RESUME BIT(19) #define UREG_DOORBELL_TO_ISR6_PNVM BIT(20) +/* + * From BZ family driver triggers this bit for suspend and resume + * The driver should update CSR_IPC_SLEEP_CONTROL before triggering + * this interrupt with suspend/resume value + */ +#define UREG_DOORBELL_TO_ISR6_SLEEP_CTRL BIT(31) + #define CNVI_MBOX_C 0xA3400C #define FSEQ_ERROR_CODE 0xA340C8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 4ebb1871bd1f..1bcaa3598785 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -193,7 +193,10 @@ enum iwl_error_event_table_status { IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), - IWL_ERROR_EVENT_TABLE_TCM = BIT(3), + IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3), + IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4), + IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5), + IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6), }; /** @@ -296,6 +299,8 @@ enum iwl_d3_status { * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation + * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once, + * e.g. for testing */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -308,6 +313,7 @@ enum iwl_trans_status { STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, + STATUS_SUPPRESS_CMD_ERROR_ONCE, }; static inline int @@ -593,7 +599,7 @@ struct iwl_trans_ops { void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); - void (*sw_reset)(struct iwl_trans *trans); + int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership); bool (*grab_nic_access)(struct iwl_trans *trans); void (*release_nic_access)(struct iwl_trans *trans); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, @@ -725,7 +731,8 @@ struct iwl_self_init_dram { * @trigger_tlv: array of pointers to triggers TLVs for debug * @lmac_error_event_table: addrs of lmacs error tables * @umac_error_event_table: addr of umac error table - * @tcm_error_event_table: address of TCM error table + * @tcm_error_event_table: address(es) of TCM error table(s) + * @rcm_error_event_table: address(es) of RCM error table(s) * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. uses enum &iwl_error_event_table_status * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state @@ -752,7 +759,8 @@ struct iwl_trans_debug { u32 lmac_error_event_table[2]; u32 umac_error_event_table; - u32 tcm_error_event_table; + u32 tcm_error_event_table[2]; + u32 rcm_error_event_table[2]; unsigned int error_event_table_tlv_status; enum iwl_ini_cfg_state internal_ini_cfg; @@ -775,6 +783,8 @@ struct iwl_trans_debug { u32 domains_bitmap; u32 ucode_preset; + bool restart_required; + u32 last_tp_resetfw; }; struct iwl_dma_ptr { @@ -924,6 +934,7 @@ struct iwl_trans_txqs { /** * struct iwl_trans - transport common data * + * @csme_own - true if we couldn't get ownership on the device * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @trans_cfg: the trans-specific configuration part @@ -937,6 +948,7 @@ struct iwl_trans_txqs { * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. + * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled * @wide_cmd_header: true when ucode supports wide command header format @@ -958,6 +970,7 @@ struct iwl_trans_txqs { * @iwl_trans_txqs: transport tx queues data. */ struct iwl_trans { + bool csme_own; const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; @@ -969,6 +982,7 @@ struct iwl_trans { struct device *dev; u32 max_skb_frags; u32 hw_rev; + u32 hw_rev_step; u32 hw_rf_id; u32 hw_id; char hw_id_str[52]; @@ -1382,10 +1396,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) trans->ops->set_pmi(trans, state); } -static inline void iwl_trans_sw_reset(struct iwl_trans *trans) +static inline int iwl_trans_sw_reset(struct iwl_trans *trans, + bool retake_ownership) { if (trans->ops->sw_reset) - trans->ops->sw_reset(trans); + return trans->ops->sw_reset(trans, retake_ownership); + return 0; } static inline void diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile new file mode 100644 index 000000000000..8e3ef0347db7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IWLMEI) += iwlmei.o +iwlmei-y += main.o +iwlmei-y += net.o +iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o +CFLAGS_trace.o := -I$(src) + +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/internal.h b/drivers/net/wireless/intel/iwlwifi/mei/internal.h new file mode 100644 index 000000000000..92fea7dd71e2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/internal.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __IWLMEI_INTERNAL_H_ +#define __IWLMEI_INTERNAL_H_ + +#include <uapi/linux/if_ether.h> +#include <linux/netdevice.h> + +#include "sap.h" + +rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme); + +void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx); + +#endif /* __IWLMEI_INTERNAL_H_ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h new file mode 100644 index 000000000000..67122cfa2292 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __iwl_mei_h__ +#define __iwl_mei_h__ + +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/ieee80211.h> + +/** + * DOC: Introduction + * + * iwlmei is the kernel module that is in charge of the commnunication between + * the iwlwifi driver and the CSME firmware's WLAN driver. This communication + * uses the SAP protocol defined in another file. + * iwlwifi can request or release ownership on the WiFi device through iwlmei. + * iwlmei may notify iwlwifi about certain events: what filter iwlwifi should + * use to passthrough inbound packets to the CSME firmware for example. iwlmei + * may also use iwlwifi to send traffic. This means that we need communication + * from iwlmei to iwlwifi and the other way around. + */ + +/** + * DOC: Life cycle + * + * iwlmei exports symbols that are needed by iwlwifi so that iwlmei will always + * be loaded when iwlwifi is alive. iwlwifi registers itself to iwlmei and + * provides the pointers to the functions that iwlmei calls whenever needed. + * iwlwifi calls iwlmei through direct and context-free function calls. + * It is assumed that only one device is accessible to the CSME firmware and + * under the scope of iwlmei so that it is valid not to have any context passed + * to iwlmei's functions. + * + * There are cases in which iwlmei can't access the CSME firmware, because the + * CSME firmware is undergoing a reset, or the mei bus decided to unbind the + * device. In those cases, iwlmei will need not to send requests over the mei + * bus. Instead, it needs to cache the requests from iwlwifi and fulfill them + * when the mei bus is available again. + * + * iwlmei can call iwlwifi as long as iwlwifi is registered to iwlmei. When + * iwlwifi goes down (the PCI device is unbound, or the iwlwifi is unloaded) + * iwlwifi needs to unregister from iwlmei. + */ + +/** + * DOC: Memory layout + * + * Since iwlwifi calls iwlmei without any context, iwlmei needs to hold a + * global pointer to its data (which is in the mei client device's private + * data area). If there was no bind on the mei bus, this pointer is NULL and + * iwlmei knows not access to the CSME firmware upon requests from iwlwifi. + * + * iwlmei needs to cache requests from iwlwifi when there is no mei client + * device available (when iwlmei has been removed from the mei bus). In this + * case, all iwlmei's data that resides in the mei client device's private data + * area is unavailable. For this specific case, a separate caching area is + * needed. + */ + +/** + * DOC: Concurrency + * + * iwlwifi can call iwlmei at any time. iwlmei will take care to synchronize + * the calls from iwlwifi with its internal flows. iwlwifi must not call iwlmei + * in flows that cannot sleep. Moreover, iwlwifi must not call iwlmei in flows + * that originated from iwlmei. + */ + +/** + * DOC: Probe and remove from mei bus driver + * + * When the mei bus driver enumerates its devices, it calls the iwlmei's probe + * function which will send the %SAP_ME_MSG_START message. The probe completes + * before the response (%SAP_ME_MSG_START_OK) is received. This response will + * be handle by the Rx path. Once it arrives, the connection to the CSME + * firmware is considered established and iwlwifi's requests can be treated + * against the CSME firmware. + * + * When the mei bus driver removes the device, iwlmei loses all the data that + * was attached to the mei client device. It clears the global pointer to the + * mei client device since it is not available anymore. This will cause all the + * requests coming from iwlwifi to be cached. This flow takes the global mutex + * to be synchronized with all the requests coming from iwlwifi. + */ + +/** + * DOC: Driver load when CSME owns the device + * + * When the driver (iwlwifi) is loaded while CSME owns the device, + * it'll ask CSME to release the device through HW registers. CSME + * will release the device only in the case that there is no connection + * through the mei bus. If there is a mei bus connection, CSME will refuse + * to release the ownership on the device through the HW registers. In that + * case, iwlwifi must first request ownership using the SAP protocol. + * + * Once iwlwifi will request ownership through the SAP protocol, CSME will + * grant the ownership on the device through the HW registers as well. + * In order to request ownership over SAP, we first need to have an interface + * which means that we need to register to mac80211. + * This can't happen before we get the NVM that contains all the capabilities + * of the device. Reading the NVM usually requires the load the firmware, but + * this is impossible as long as we don't have ownership on the device. + * In order to solve this chicken and egg problem, the host driver can get + * the NVM through CSME which owns the device. It can send + * %SAP_MSG_NOTIF_GET_NVM, which will be replied by %SAP_MSG_NOTIF_NVM with + * the NVM's content that the host driver needs. + */ + +/** + * DOC: CSME behavior regarding the ownership requests + * + * The ownership requests from the host can come in two different ways: + * - the HW registers in iwl_pcie_set_hw_ready + * - using the Software Arbitration Protocol (SAP) + * + * The host can ask CSME who owns the device with %SAP_MSG_NOTIF_WHO_OWNS_NIC, + * and it can request ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP. + * The host will first use %SAP_MSG_NOTIF_WHO_OWNS_NIC to know what state + * CSME is in. In case CSME thinks it owns the device, the host can ask for + * ownership with %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP. + * + * Here the table that describes CSME's behavior upon ownership request: + * + * +-------------------+------------+--------------+-----------------------------+------------+ + * | State | HW reg bit | Reply for | Event | HW reg bit | + * | | before | WHO_OWNS_NIC | | after | + * +===================+============+==============+=============================+============+ + * | WiAMT not | 0 | Host | HW register or | 0 | + * | operational | Host owner | | HOST_ASKS_FOR_NIC_OWNERSHIP | Host owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | N/A | HW register | 0 | + * | SAP down & | CSME owner | | | Host owner | + * | no session active | | | | | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | CSME | HW register | 1 | + * | SAP up | CSME owner | | | CSME owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + * | Operational & | 1 | CSME | HOST_ASKS_FOR_NIC_OWNERSHIP | 0 | + * | SAP up | CSME owner | | | Host owner | + * +-------------------+------------+--------------+-----------------------------+------------+ + */ + +/** + * DOC: Driver load when CSME is associated and a session is active + * + * A "session" is active when CSME is associated to an access point and the + * link is used to attach a remote driver or to control the system remotely. + * When a session is active, we want to make sure it won't disconnect when we + * take ownership on the device. + * In this case, the driver can get the device, but it'll need to make + * sure that it'll connect to the exact same AP (same BSSID). + * In order to do so, CSME will send the connection parameters through + * SAP and then the host can check if it can connect to this same AP. + * If yes, it can request ownership through SAP and connect quickly without + * scanning all the channels, but just probing the AP on the channel that + * CSME was connected to. + * In order to signal this specific scenario to iwlwifi, iwlmei will + * immediately require iwlwifi to report RF-Kill to the network stack. This + * RF-Kill will prevent the stack from getting the device, and it has a reason + * that tells the userspace that the device is in RF-Kill because it is not + * owned by the host. Once the userspace has configured the right profile, + * it'll be able to let iwlmei know that it can request ownership over SAP + * which will remove the RF-Kill, and finally allow the host to connect. + * The host has then 3 seconds to connect (including DHCP). Had the host + * failed to connect within those 3 seconds, CSME will take the device back. + */ + +/** + * DOC: Datapath + * + * CSME can transmit packets, through the netdev that it gets from the wifi + * driver. It'll send packet in the 802.3 format and simply call + * dev_queue_xmit. + * + * For Rx, iwlmei registers a Rx handler that it attaches to the netdev. iwlmei + * may catch packets and send them to CSME, it can then either drop them so + * that they are invisible to user space, or let them go the user space. + * + * Packets transmitted by the user space do not need to be forwarded to CSME + * with the exception of the DHCP request. In order to know what IP is used + * by the user space, CSME needs to get the DHCP request. See + * iwl_mei_tx_copy_to_csme(). + */ + +/** + * enum iwl_mei_nvm_caps - capabilities for MEI NVM + * @MEI_NVM_CAPS_LARI_SUPPORT: Lari is supported + * @MEI_NVM_CAPS_11AX_SUPPORT: 11AX is supported + */ +enum iwl_mei_nvm_caps { + MEI_NVM_CAPS_LARI_SUPPORT = BIT(0), + MEI_NVM_CAPS_11AX_SUPPORT = BIT(1), +}; + +/** + * struct iwl_mei_nvm - used to pass the NVM from CSME + * @hw_addr: The MAC address + * @n_hw_addrs: The number of MAC addresses + * @reserved: For alignment. + * @radio_cfg: The radio configuration. + * @caps: See &enum iwl_mei_nvm_caps. + * @nvm_version: The version of the NVM. + * @channels: The data for each channel. + * + * If a field is added, it must correspond to the SAP structure. + */ +struct iwl_mei_nvm { + u8 hw_addr[ETH_ALEN]; + u8 n_hw_addrs; + u8 reserved; + u32 radio_cfg; + u32 caps; + u32 nvm_version; + u32 channels[110]; +}; + +/** + * enum iwl_mei_pairwise_cipher - cipher for UCAST key + * @IWL_MEI_CIPHER_NONE: none + * @IWL_MEI_CIPHER_CCMP: ccmp + * @IWL_MEI_CIPHER_GCMP: gcmp + * @IWL_MEI_CIPHER_GCMP_256: gcmp 256 + * + * Note that those values are dictated by the CSME firmware API (see sap.h) + */ +enum iwl_mei_pairwise_cipher { + IWL_MEI_CIPHER_NONE = 0, + IWL_MEI_CIPHER_CCMP = 4, + IWL_MEI_CIPHER_GCMP = 8, + IWL_MEI_CIPHER_GCMP_256 = 9, +}; + +/** + * enum iwl_mei_akm_auth - a combination of AKM and AUTH method + * @IWL_MEI_AKM_AUTH_OPEN: No encryption + * @IWL_MEI_AKM_AUTH_RSNA: 1X profile + * @IWL_MEI_AKM_AUTH_RSNA_PSK: PSK profile + * @IWL_MEI_AKM_AUTH_SAE: SAE profile + * + * Note that those values are dictated by the CSME firmware API (see sap.h) + */ +enum iwl_mei_akm_auth { + IWL_MEI_AKM_AUTH_OPEN = 0, + IWL_MEI_AKM_AUTH_RSNA = 6, + IWL_MEI_AKM_AUTH_RSNA_PSK = 7, + IWL_MEI_AKM_AUTH_SAE = 9, +}; + +/** + * struct iwl_mei_conn_info - connection info + * @lp_state: link protection state + * @auth_mode: authentication mode + * @ssid_len: the length of SSID + * @ssid: the SSID + * @pairwise_cipher: the cipher used for unicast packets + * @channel: the associated channel + * @band: the associated band + * @bssid: the BSSID + */ +struct iwl_mei_conn_info { + u8 lp_state; + u8 auth_mode; + u8 ssid_len; + u8 channel; + u8 band; + u8 pairwise_cipher; + u8 bssid[ETH_ALEN]; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +}; + +/** + * struct iwl_mei_colloc_info - collocated AP info + * @channel: the channel of the collocated AP + * @bssid: the BSSID of the collocated AP + */ +struct iwl_mei_colloc_info { + u8 channel; + u8 bssid[ETH_ALEN]; +}; + +/* + * struct iwl_mei_ops - driver's operations called by iwlmei + * Operations will not be called more than once concurrently. + * It's not allowed to call iwlmei functions from this context. + * + * @me_conn_status: provide information about CSME's current connection. + * @rfkill: called when the wifi driver should report a change in the rfkill + * status. + * @roaming_forbidden: indicates whether roaming is forbidden. + * @sap_connected: indicate that SAP is now connected. Will be called in case + * the wifi driver registered to iwlmei before SAP connection succeeded or + * when the SAP connection is re-established. + * @nic_stolen: this means that device is no longer available. The device can + * still be used until the callback returns. + */ +struct iwl_mei_ops { + void (*me_conn_status)(void *priv, + const struct iwl_mei_conn_info *conn_info); + void (*rfkill)(void *priv, bool blocked); + void (*roaming_forbidden)(void *priv, bool forbidden); + void (*sap_connected)(void *priv); + void (*nic_stolen)(void *priv); +}; + +#if IS_ENABLED(CONFIG_IWLMEI) + +/** + * iwl_mei_is_connected() - is the connection to the CSME firmware established? + * + * Return: true if we have a SAP connection + */ +bool iwl_mei_is_connected(void); + +/** + * iwl_mei_get_nvm() - returns the NVM for the device + * + * It is the caller's responsibility to free the memory returned + * by this function. + * This function blocks (sleeps) until the NVM is ready. + * + * Return: the NVM as received from CSME + */ +struct iwl_mei_nvm *iwl_mei_get_nvm(void); + +/** + * iwl_mei_get_ownership() - request ownership + * + * This function blocks until ownership is granted or timeout expired. + * + * Return: 0 in case we could get ownership on the device + */ +int iwl_mei_get_ownership(void); + +/** + * iwl_mei_set_rfkill_state() - set SW and HW RF kill states + * @hw_rfkill: HW RF kill state. + * @sw_rfkill: SW RF kill state. + * + * This function must be called when SW RF kill is issued by the user. + */ +void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill); + +/** + * iwl_mei_set_nic_info() - set mac address + * @mac_address: mac address to set + * @nvm_address: NVM mac adsress to set + * + * This function must be called upon mac address change. + */ +void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address); + +/** + * iwl_mei_set_country_code() - set new country code + * @mcc: the new applied MCC + * + * This function must be called upon country code update + */ +void iwl_mei_set_country_code(u16 mcc); + +/** + * iwl_mei_set_power_limit() - set TX power limit + * @power_limit: pointer to an array of 10 elements (le16) represents the power + * restrictions per chain. + * + * This function must be called upon power restrictions change + */ +void iwl_mei_set_power_limit(const __le16 *power_limit); + +/** + * iwl_mei_register() - register the wifi driver to iwlmei + * @priv: a pointer to the wifi driver's context. Cannot be NULL. + * @ops: the ops structure. + * + * Return: 0 unless something went wrong. It is illegal to call any + * other API function before this function is called and succeeds. + * + * Only one wifi driver instance (wifi device instance really) + * can register at a time. + */ +int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops); + +/** + * iwl_mei_start_unregister() - unregister the wifi driver from iwlmei + * + * From this point on, iwlmei will not used the callbacks provided by + * the driver, but the device is still usable. + */ +void iwl_mei_start_unregister(void); + +/** + * iwl_mei_unregister_complete() - complete the unregistration + * + * Must be called after iwl_mei_start_unregister. When this function returns, + * the device is owned by CSME. + */ +void iwl_mei_unregister_complete(void); + +/** + * iwl_mei_set_netdev() - sets the netdev for Tx / Rx. + * @netdev: the net_device + * + * The caller should set the netdev to a non-NULL value when the + * interface is added. Packets might be sent to the driver immediately + * afterwards. + * The caller should set the netdev to NULL when the interface is removed. + * This function will call synchronize_net() after setting the netdev to NULL. + * Only when this function returns, can the caller assume that iwlmei will + * no longer inject packets into the netdev's Tx path. + * + * Context: This function can sleep and assumes rtnl_lock is taken. + * The netdev must be set to NULL before iwl_mei_start_unregister() is called. + */ +void iwl_mei_set_netdev(struct net_device *netdev); + +/** + * iwl_mei_tx_copy_to_csme() - must be called for each packet sent by + * the wifi driver. + * @skb: the skb sent + * @ivlen: the size of the IV that needs to be skipped after the MAC and + * before the SNAP header. + * + * This function doesn't take any lock, it simply tries to catch DHCP + * packets sent by the wifi driver. If the packet is a DHCP packet, it + * will send it to CSME. This function must not be called for virtual + * interfaces that are not monitored by CSME, meaning it must be called + * only for packets transmitted by the netdevice that was registered + * with iwl_mei_set_netdev(). + */ +void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, unsigned int ivlen); + +/** + * iwl_mei_host_associated() - must be called when iwlwifi associated. + * @conn_info: pointer to the connection info structure. + * @colloc_info: pointer to the collocated AP info. This is relevant only in + * case of UHB associated AP, otherwise set to NULL. + */ +void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info); + +/** + * iwl_mei_host_disassociated() - must be called when iwlwifi disassociated. + */ +void iwl_mei_host_disassociated(void); + +/** + * iwl_mei_device_down() - must be called when the device is down + */ +void iwl_mei_device_down(void); + +#else + +static inline bool iwl_mei_is_connected(void) +{ return false; } + +static inline struct iwl_mei_nvm *iwl_mei_get_nvm(void) +{ return NULL; } + +static inline int iwl_mei_get_ownership(void) +{ return 0; } + +static inline void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) +{} + +static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) +{} + +static inline void iwl_mei_set_country_code(u16 mcc) +{} + +static inline void iwl_mei_set_power_limit(__le16 *power_limit) +{} + +static inline int iwl_mei_register(void *priv, + const struct iwl_mei_ops *ops) +{ return 0; } + +static inline void iwl_mei_start_unregister(void) +{} + +static inline void iwl_mei_unregister_complete(void) +{} + +static inline void iwl_mei_set_netdev(struct net_device *netdev) +{} + +static inline void iwl_mei_tx_copy_to_csme(struct sk_buff *skb, + unsigned int ivlen) +{} + +static inline void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info) +{} + +static inline void iwl_mei_host_disassociated(void) +{} + +static inline void iwl_mei_device_down(void) +{} + +#endif /* CONFIG_IWLMEI */ + +#endif /* __iwl_mei_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c new file mode 100644 index 000000000000..d9733aaf6f6e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -0,0 +1,2001 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/ieee80211.h> +#include <linux/rtnetlink.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mei_cl_bus.h> +#include <linux/rcupdate.h> +#include <linux/debugfs.h> +#include <linux/skbuff.h> +#include <linux/wait.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <net/cfg80211.h> + +#include "internal.h" +#include "iwl-mei.h" +#include "trace.h" +#include "trace-data.h" +#include "sap.h" + +MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface"); +MODULE_LICENSE("GPL"); + +#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \ + 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65) + +/* + * Since iwlwifi calls iwlmei without any context, hold a pointer to the + * mei_cl_device structure here. + * Define a mutex that will synchronize all the flows between iwlwifi and + * iwlmei. + * Note that iwlmei can't have several instances, so it ok to have static + * variables here. + */ +static struct mei_cl_device *iwl_mei_global_cldev; +static DEFINE_MUTEX(iwl_mei_mutex); +static unsigned long iwl_mei_status; + +enum iwl_mei_status_bits { + IWL_MEI_STATUS_SAP_CONNECTED, +}; + +bool iwl_mei_is_connected(void) +{ + return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); +} +EXPORT_SYMBOL_GPL(iwl_mei_is_connected); + +#define SAP_VERSION 3 +#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ + +struct iwl_sap_q_ctrl_blk { + __le32 wr_ptr; + __le32 rd_ptr; + __le32 size; +}; + +enum iwl_sap_q_idx { + SAP_QUEUE_IDX_NOTIF = 0, + SAP_QUEUE_IDX_DATA, + SAP_QUEUE_IDX_MAX, +}; + +struct iwl_sap_dir { + __le32 reserved; + struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX]; +}; + +enum iwl_sap_dir_idx { + SAP_DIRECTION_HOST_TO_ME = 0, + SAP_DIRECTION_ME_TO_HOST, + SAP_DIRECTION_MAX, +}; + +struct iwl_sap_shared_mem_ctrl_blk { + __le32 sap_id; + __le32 size; + struct iwl_sap_dir dir[SAP_DIRECTION_MAX]; +}; + +/* + * The shared area has the following layout: + * + * +-----------------------------------+ + * |struct iwl_sap_shared_mem_ctrl_blk | + * +-----------------------------------+ + * |Host -> ME data queue | + * +-----------------------------------+ + * |Host -> ME notif queue | + * +-----------------------------------+ + * |ME -> Host data queue | + * +-----------------------------------+ + * |ME -> host notif queue | + * +-----------------------------------+ + * |SAP control block id (SAP!) | + * +-----------------------------------+ + */ + +#define SAP_H2M_DATA_Q_SZ 48256 +#define SAP_M2H_DATA_Q_SZ 24128 +#define SAP_H2M_NOTIF_Q_SZ 2240 +#define SAP_M2H_NOTIF_Q_SZ 62720 + +#define _IWL_MEI_SAP_SHARED_MEM_SZ \ + (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ + SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ + SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) + +#define IWL_MEI_SAP_SHARED_MEM_SZ \ + (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) + +struct iwl_mei_shared_mem_ptrs { + struct iwl_sap_shared_mem_ctrl_blk *ctrl; + void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; + size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; +}; + +struct iwl_mei_filters { + struct rcu_head rcu_head; + struct iwl_sap_oob_filters filters; +}; + +/** + * struct iwl_mei - holds the private date for iwl_mei + * + * @get_nvm_wq: the wait queue for the get_nvm flow + * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA + * message. Used so that we can send CHECK_SHARED_AREA from atomic + * contexts. + * @get_ownership_wq: the wait queue for the get_ownership_flow + * @shared_mem: the memory that is shared between CSME and the host + * @cldev: the pointer to the MEI client device + * @nvm: the data returned by the CSME for the NVM + * @filters: the filters sent by CSME + * @got_ownership: true if we own the device + * @amt_enabled: true if CSME has wireless enabled + * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI + * bus, but rather need to wait until send_csa_msg_wk runs + * @csme_taking_ownership: true when CSME is taking ownership. Used to remember + * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down + * flow. + * @csa_throttle_end_wk: used when &csa_throttled is true + * @data_q_lock: protects the access to the data queues which are + * accessed without the mutex. + * @sap_seq_no: the sequence number for the SAP messages + * @seq_no: the sequence number for the SAP messages + * @dbgfs_dir: the debugfs dir entry + */ +struct iwl_mei { + wait_queue_head_t get_nvm_wq; + struct work_struct send_csa_msg_wk; + wait_queue_head_t get_ownership_wq; + struct iwl_mei_shared_mem_ptrs shared_mem; + struct mei_cl_device *cldev; + struct iwl_mei_nvm *nvm; + struct iwl_mei_filters __rcu *filters; + bool got_ownership; + bool amt_enabled; + bool csa_throttled; + bool csme_taking_ownership; + struct delayed_work csa_throttle_end_wk; + spinlock_t data_q_lock; + + atomic_t sap_seq_no; + atomic_t seq_no; + + struct dentry *dbgfs_dir; +}; + +/** + * struct iwl_mei_cache - cache for the parameters from iwlwifi + * @ops: Callbacks to iwlwifi. + * @netdev: The netdev that will be used to transmit / receive packets. + * @conn_info: The connection info message triggered by iwlwifi's association. + * @power_limit: pointer to an array of 10 elements (le16) represents the power + * restrictions per chain. + * @rf_kill: rf kill state. + * @mcc: MCC info + * @mac_address: interface MAC address. + * @nvm_address: NVM MAC address. + * @priv: A pointer to iwlwifi. + * + * This used to cache the configurations coming from iwlwifi's way. The data + * is cached here so that we can buffer the configuration even if we don't have + * a bind from the mei bus and hence, on iwl_mei structure. + */ +struct iwl_mei_cache { + const struct iwl_mei_ops *ops; + struct net_device __rcu *netdev; + const struct iwl_sap_notif_connection_info *conn_info; + const __le16 *power_limit; + u32 rf_kill; + u16 mcc; + u8 mac_address[6]; + u8 nvm_address[6]; + void *priv; +}; + +static struct iwl_mei_cache iwl_mei_cache = { + .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED +}; + +static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + if (mei_cldev_dma_unmap(cldev)) + dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n"); + memset(&mei->shared_mem, 0, sizeof(mei->shared_mem)); +} + +#define HBM_DMA_BUF_ID_WLAN 1 + +static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + + mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, + IWL_MEI_SAP_SHARED_MEM_SZ); + + if (IS_ERR(mem->ctrl)) { + int ret = PTR_ERR(mem->ctrl); + + dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", + ret); + mem->ctrl = NULL; + + return ret; + } + + memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); + + return 0; +} + +static void iwl_mei_init_shared_mem(struct iwl_mei *mei) +{ + struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; + struct iwl_sap_dir *h2m; + struct iwl_sap_dir *m2h; + int dir, queue; + u8 *q_head; + + mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID); + + mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl)); + + h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + + h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = + cpu_to_le32(SAP_H2M_DATA_Q_SZ); + h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = + cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); + m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = + cpu_to_le32(SAP_M2H_DATA_Q_SZ); + m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = + cpu_to_le32(SAP_M2H_NOTIF_Q_SZ); + + /* q_head points to the start of the first queue */ + q_head = (void *)(mem->ctrl + 1); + + /* Initialize the queue heads */ + for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) { + for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) { + mem->q_head[dir][queue] = q_head; + q_head += + le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); + mem->q_size[dir][queue] = + le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); + } + } + + *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID); +} + +static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev, + struct iwl_sap_q_ctrl_blk *notif_q, + u8 *q_head, + const struct iwl_sap_hdr *hdr, + u32 q_sz) +{ + u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + size_t room_in_buf; + size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len); + + if (rd > q_sz || wr > q_sz) { + dev_err(&cldev->dev, + "Pointers are past the end of the buffer\n"); + return -EINVAL; + } + + room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; + + /* we don't have enough room for the data to write */ + if (room_in_buf < tx_sz) { + dev_err(&cldev->dev, + "Not enough room in the buffer\n"); + return -ENOSPC; + } + + if (wr + tx_sz <= q_sz) { + memcpy(q_head + wr, hdr, tx_sz); + } else { + memcpy(q_head + wr, hdr, q_sz - wr); + memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr)); + } + + WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); + return 0; +} + +static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei) +{ + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + + if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr)) + return true; + + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr); +} + +static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_me_msg_start msg = { + .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA), + .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), + }; + int ret; + + lockdep_assert_held(&iwl_mei_mutex); + + if (mei->csa_throttled) + return 0; + + trace_iwlmei_me_msg(&msg.hdr, true); + ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); + if (ret != sizeof(msg)) { + dev_err(&cldev->dev, + "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n", + ret); + return ret; + } + + mei->csa_throttled = true; + + schedule_delayed_work(&mei->csa_throttle_end_wk, + msecs_to_jiffies(100)); + + return 0; +} + +static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk) +{ + struct iwl_mei *mei = + container_of(wk, struct iwl_mei, csa_throttle_end_wk.work); + + mutex_lock(&iwl_mei_mutex); + + mei->csa_throttled = false; + + if (iwl_mei_host_to_me_data_pending(mei)) + iwl_mei_send_check_shared_area(mei->cldev); + + mutex_unlock(&iwl_mei_mutex); +} + +static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev, + struct iwl_sap_hdr *hdr) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + void *q_head; + u32 q_sz; + int ret; + + lockdep_assert_held(&iwl_mei_mutex); + + if (!mei->shared_mem.ctrl) { + dev_err(&cldev->dev, + "No shared memory, can't send any SAP message\n"); + return -EINVAL; + } + + if (!iwl_mei_is_connected()) { + dev_err(&cldev->dev, + "Can't send a SAP message if we're not connected\n"); + return -ENODEV; + } + + hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + dev_dbg(&cldev->dev, "Sending %d\n", hdr->type); + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; + q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; + ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz); + + if (ret < 0) + return ret; + + trace_iwlmei_sap_cmd(hdr, true); + + return iwl_mei_send_check_shared_area(cldev); +} + +void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) +{ + struct iwl_sap_q_ctrl_blk *notif_q; + struct iwl_sap_dir *dir; + struct iwl_mei *mei; + size_t room_in_buf; + size_t tx_sz; + size_t hdr_sz; + u32 q_sz; + u32 rd; + u32 wr; + void *q_head; + + if (!iwl_mei_global_cldev) + return; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + /* + * We access this path for Rx packets (the more common case) + * and from Tx path when we send DHCP packets, the latter is + * very unlikely. + * Take the lock already here to make sure we see that remove() + * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit. + */ + spin_lock_bh(&mei->data_q_lock); + + if (!iwl_mei_is_connected()) { + spin_unlock_bh(&mei->data_q_lock); + return; + } + + /* + * We are in a RCU critical section and the remove from the CSME bus + * which would free this memory waits for the readers to complete (this + * is done in netdev_rx_handler_unregister). + */ + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; + q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; + + rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) : + sizeof(struct iwl_sap_hdr); + tx_sz = skb->len + hdr_sz; + + if (rd > q_sz || wr > q_sz) { + dev_err(&mei->cldev->dev, + "can't write the data: pointers are past the end of the buffer\n"); + goto out; + } + + room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; + + /* we don't have enough room for the data to write */ + if (room_in_buf < tx_sz) { + dev_err(&mei->cldev->dev, + "Not enough room in the buffer for this data\n"); + goto out; + } + + if (skb_headroom(skb) < hdr_sz) { + dev_err(&mei->cldev->dev, + "Not enough headroom in the skb to write the SAP header\n"); + goto out; + } + + if (cb_tx) { + struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); + + cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); + cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); + cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX)); + cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr)); + trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP); + } else { + struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr)); + + hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET); + hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); + hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); + trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR); + } + + if (wr + tx_sz <= q_sz) { + skb_copy_bits(skb, 0, q_head + wr, tx_sz); + } else { + skb_copy_bits(skb, 0, q_head + wr, q_sz - wr); + skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr)); + } + + WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); + +out: + spin_unlock_bh(&mei->data_q_lock); +} + +static int +iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type) +{ + struct iwl_sap_hdr msg = { + .type = cpu_to_le16(type), + }; + + return iwl_mei_send_sap_msg_payload(cldev, &msg); +} + +static void iwl_mei_send_csa_msg_wk(struct work_struct *wk) +{ + struct iwl_mei *mei = + container_of(wk, struct iwl_mei, send_csa_msg_wk); + + if (!iwl_mei_is_connected()) + return; + + mutex_lock(&iwl_mei_mutex); + + iwl_mei_send_check_shared_area(mei->cldev); + + mutex_unlock(&iwl_mei_mutex); +} + +/* Called in a RCU read critical section from netif_receive_skb */ +static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct iwl_mei *mei = + rcu_dereference(skb->dev->rx_handler_data); + struct iwl_mei_filters *filters = rcu_dereference(mei->filters); + bool rx_for_csme = false; + rx_handler_result_t res; + + /* + * remove() unregisters this handler and synchronize_net, so this + * should never happen. + */ + if (!iwl_mei_is_connected()) { + dev_err(&mei->cldev->dev, + "Got an Rx packet, but we're not connected to SAP?\n"); + return RX_HANDLER_PASS; + } + + if (filters) + res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme); + else + res = RX_HANDLER_PASS; + + /* + * The data is already on the ring of the shared area, all we + * need to do is to tell the CSME firmware to check what we have + * there. + */ + if (rx_for_csme) + schedule_work(&mei->send_csa_msg_wk); + + if (res != RX_HANDLER_PASS) { + trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR); + dev_kfree_skb(skb); + } + + return res; +} + +static void +iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, + const struct iwl_sap_me_msg_start_ok *rsp, + ssize_t len) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + if (len != sizeof(*rsp)) { + dev_err(&cldev->dev, + "got invalid SAP_ME_MSG_START_OK from CSME firmware\n"); + dev_err(&cldev->dev, + "size is incorrect: %zd instead of %zu\n", + len, sizeof(*rsp)); + return; + } + + if (rsp->supported_version != SAP_VERSION) { + dev_err(&cldev->dev, + "didn't get the expected version: got %d\n", + rsp->supported_version); + return; + } + + mutex_lock(&iwl_mei_mutex); + set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); + /* wifi driver has registered already */ + if (iwl_mei_cache.ops) { + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_UP); + iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv); + } + + mutex_unlock(&iwl_mei_mutex); +} + +static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev, + const struct iwl_sap_csme_filters *filters) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + struct iwl_mei_filters *new_filters; + struct iwl_mei_filters *old_filters; + + old_filters = + rcu_dereference_protected(mei->filters, + lockdep_is_held(&iwl_mei_mutex)); + + new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL); + if (!new_filters) + return; + + /* Copy the OOB filters */ + new_filters->filters = filters->filters; + + rcu_assign_pointer(mei->filters, new_filters); + + if (old_filters) + kfree_rcu(old_filters, rcu_head); +} + +static void +iwl_mei_handle_conn_status(struct mei_cl_device *cldev, + const struct iwl_sap_notif_conn_status *status) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_mei_conn_info conn_info = { + .lp_state = le32_to_cpu(status->link_prot_state), + .ssid_len = le32_to_cpu(status->conn_info.ssid_len), + .channel = status->conn_info.channel, + .band = status->conn_info.band, + .auth_mode = le32_to_cpu(status->conn_info.auth_mode), + .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher), + }; + + if (!iwl_mei_cache.ops || + conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid)) + return; + + memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len); + ether_addr_copy(conn_info.bssid, status->conn_info.bssid); + + iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); + + /* + * Update the Rfkill state in case the host does not own the device: + * if we are in Link Protection, ask to not touch the device, else, + * unblock rfkill. + * If the host owns the device, inform the user space whether it can + * roam. + */ + if (mei->got_ownership) + iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv, + status->link_prot_state); + else + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, + status->link_prot_state); +} + +static void iwl_mei_set_init_conf(struct iwl_mei *mei) +{ + struct iwl_sap_notif_host_link_up link_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), + .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)), + }; + struct iwl_sap_notif_country_code mcc_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), + .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)), + .mcc = cpu_to_le16(iwl_mei_cache.mcc), + }; + struct iwl_sap_notif_sar_limits sar_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), + .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)), + }; + struct iwl_sap_notif_host_nic_info nic_info_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), + .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)), + }; + struct iwl_sap_msg_dw rfkill_msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), + .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)), + .val = cpu_to_le32(iwl_mei_cache.rf_kill), + }; + + iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC); + + if (iwl_mei_cache.conn_info) { + link_msg.conn_info = *iwl_mei_cache.conn_info; + iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr); + } + + iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr); + + if (iwl_mei_cache.power_limit) { + memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit, + sizeof(sar_msg.sar_chain_info_table)); + iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); + } + + ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); + ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); + iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); + + iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); +} + +static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct net_device *netdev; + + /* + * First take rtnl and only then the mutex to avoid an ABBA + * with iwl_mei_set_netdev() + */ + rtnl_lock(); + mutex_lock(&iwl_mei_mutex); + + netdev = rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (mei->amt_enabled == !!le32_to_cpu(dw->val)) + goto out; + + mei->amt_enabled = dw->val; + + if (mei->amt_enabled) { + if (netdev) + netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); + + iwl_mei_set_init_conf(mei); + } else { + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); + if (netdev) + netdev_rx_handler_unregister(netdev); + } + +out: + mutex_unlock(&iwl_mei_mutex); + rtnl_unlock(); +} + +static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME); +} + +static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev, + const void *payload) +{ + /* We can get ownership and driver is registered, go ahead */ + if (iwl_mei_cache.ops) + iwl_mei_send_sap_msg(cldev, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); +} + +static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev, + const void *payload) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + dev_info(&cldev->dev, "CSME takes ownership\n"); + + mei->got_ownership = false; + + /* + * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver + * is finished taking the device down. + */ + mei->csme_taking_ownership = true; + + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true); +} + +static void iwl_mei_handle_nvm(struct mei_cl_device *cldev, + const struct iwl_sap_nvm *sap_nvm) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm; + int i; + + kfree(mei->nvm); + mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL); + if (!mei->nvm) + return; + + ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr); + mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs; + mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg); + mei->nvm->caps = le32_to_cpu(sap_nvm->caps); + mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version); + + for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++) + mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]); + + wake_up_all(&mei->get_nvm_wq); +} + +static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev, + const struct iwl_sap_msg_dw *dw) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + + /* + * This means that we can't use the wifi device right now, CSME is not + * ready to let us use it. + */ + if (!dw->val) { + dev_info(&cldev->dev, "Ownership req denied\n"); + return; + } + + mei->got_ownership = true; + wake_up_all(&mei->get_ownership_wq); + + iwl_mei_send_sap_msg(cldev, + SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED); + + /* We can now start the connection, unblock rfkill */ + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); +} + +static void iwl_mei_handle_ping(struct mei_cl_device *cldev, + const struct iwl_sap_hdr *hdr) +{ + iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG); +} + +static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev, + const struct iwl_sap_hdr *hdr) +{ + u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr); + u16 type = le16_to_cpu(hdr->type); + + dev_dbg(&cldev->dev, + "Got a new SAP message: type %d, len %d, seq %d\n", + le16_to_cpu(hdr->type), len, + le32_to_cpu(hdr->seq_num)); + +#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + mutex_lock(&iwl_mei_mutex); \ + _handler(cldev, (const void *)hdr); \ + mutex_unlock(&iwl_mei_mutex); \ + break + +#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + _handler(cldev, (const void *)hdr); \ + break + +#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \ + case SAP_MSG_NOTIF_ ## _cmd: \ + if (len < _sz) { \ + dev_err(&cldev->dev, \ + "Bad size for %d: %u < %u\n", \ + le16_to_cpu(hdr->type), \ + (unsigned int)len, \ + (unsigned int)_sz); \ + break; \ + } \ + break + + switch (type) { + SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0); + SAP_MSG_HANDLER(CSME_FILTERS, + iwl_mei_handle_csme_filters, + sizeof(struct iwl_sap_csme_filters)); + SAP_MSG_HANDLER(CSME_CONN_STATUS, + iwl_mei_handle_conn_status, + sizeof(struct iwl_sap_notif_conn_status)); + SAP_MSG_HANDLER_NO_LOCK(AMT_STATE, + iwl_mei_handle_amt_state, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER_NO_HANDLER(PONG, 0); + SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm, + sizeof(struct iwl_sap_nvm)); + SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ, + iwl_mei_handle_rx_host_own_req, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner, + sizeof(struct iwl_sap_msg_dw)); + SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP, + iwl_mei_handle_can_release_ownership, 0); + SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP, + iwl_mei_handle_csme_taking_ownership, 0); + default: + /* + * This is not really an error, there are message that we decided + * to ignore, yet, it is useful to be able to leave a note if debug + * is enabled. + */ + dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n", + le16_to_cpu(hdr->type), len); + } + +#undef SAP_MSG_HANDLER +#undef SAP_MSG_HANDLER_NO_LOCK +} + +static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz, + u32 *_rd, u32 wr, + void *_buf, u32 len) +{ + u8 *buf = _buf; + u32 rd = *_rd; + + if (rd + len <= q_sz) { + memcpy(buf, q_head + rd, len); + rd += len; + } else { + memcpy(buf, q_head + rd, q_sz - rd); + memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd)); + rd = len - (q_sz - rd); + } + + *_rd = rd; +} + +#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \ + IEEE80211_TKIP_IV_LEN + \ + sizeof(rfc1042_header) + ETH_TLEN) + +static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, + const u8 *q_head, u32 q_sz, + u32 rd, u32 wr, ssize_t valid_rx_sz, + struct sk_buff_head *tx_skbs) +{ + struct iwl_sap_hdr hdr; + struct net_device *netdev = + rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (!netdev) + return; + + while (valid_rx_sz >= sizeof(hdr)) { + struct ethhdr *ethhdr; + unsigned char *data; + struct sk_buff *skb; + u16 len; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr)); + valid_rx_sz -= sizeof(hdr); + len = le16_to_cpu(hdr.len); + + if (valid_rx_sz < len) { + dev_err(&cldev->dev, + "Data queue is corrupted: valid data len %zd, len %d\n", + valid_rx_sz, len); + break; + } + + if (len < sizeof(*ethhdr)) { + dev_err(&cldev->dev, + "Data len is smaller than an ethernet header? len = %d\n", + len); + } + + valid_rx_sz -= len; + + if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) { + dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n", + le16_to_cpu(hdr.type), len); + continue; + } + + /* We need enough room for the WiFi header + SNAP + IV */ + skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); + + skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); + ethhdr = skb_push(skb, sizeof(*ethhdr)); + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, + ethhdr, sizeof(*ethhdr)); + len -= sizeof(*ethhdr); + + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->protocol = ethhdr->h_proto; + + data = skb_put(skb, len); + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len); + + /* + * Enqueue the skb here so that it can be sent later when we + * do not hold the mutex. TX'ing a packet with a mutex held is + * possible, but it wouldn't be nice to forbid the TX path to + * call any of iwlmei's functions, since every API from iwlmei + * needs the mutex. + */ + __skb_queue_tail(tx_skbs, skb); + } +} + +static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev, + const u8 *q_head, u32 q_sz, + u32 rd, u32 wr, ssize_t valid_rx_sz) +{ + struct page *p = alloc_page(GFP_KERNEL); + struct iwl_sap_hdr *hdr; + + if (!p) + return; + + hdr = page_address(p); + + while (valid_rx_sz >= sizeof(*hdr)) { + u16 len; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr)); + valid_rx_sz -= sizeof(*hdr); + len = le16_to_cpu(hdr->len); + + if (valid_rx_sz < len) + break; + + iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len); + + trace_iwlmei_sap_cmd(hdr, false); + iwl_mei_handle_sap_msg(cldev, hdr); + valid_rx_sz -= len; + } + + /* valid_rx_sz must be 0 now... */ + if (valid_rx_sz) + dev_err(&cldev->dev, + "More data in the buffer although we read it all\n"); + + __free_page(p); +} + +static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev, + struct iwl_sap_q_ctrl_blk *notif_q, + const u8 *q_head, + struct sk_buff_head *skbs, + u32 q_sz) +{ + u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); + u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); + ssize_t valid_rx_sz; + + if (rd > q_sz || wr > q_sz) { + dev_err(&cldev->dev, + "Pointers are past the buffer limit\n"); + return; + } + + if (rd == wr) + return; + + valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr; + + if (skbs) + iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr, + valid_rx_sz, skbs); + else + iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr, + valid_rx_sz); + + /* Increment the read pointer to point to the write pointer */ + WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr)); +} + +static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_q_ctrl_blk *notif_q; + struct sk_buff_head tx_skbs; + struct iwl_sap_dir *dir; + void *q_head; + u32 q_sz; + + if (!mei->shared_mem.ctrl) + return; + + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; + q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; + + /* + * Do not hold the mutex here, but rather each and every message + * handler takes it. + * This allows message handlers to take it at a certain time. + */ + iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz); + + mutex_lock(&iwl_mei_mutex); + dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; + notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; + q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; + q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; + + __skb_queue_head_init(&tx_skbs); + + iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz); + + if (skb_queue_empty(&tx_skbs)) { + mutex_unlock(&iwl_mei_mutex); + return; + } + + /* + * Take the RCU read lock before we unlock the mutex to make sure that + * even if the netdev is replaced by another non-NULL netdev right after + * we unlock the mutex, the old netdev will still be valid when we + * transmit the frames. We can't allow to replace the netdev here because + * the skbs hold a pointer to the netdev. + */ + rcu_read_lock(); + + mutex_unlock(&iwl_mei_mutex); + + if (!rcu_access_pointer(iwl_mei_cache.netdev)) { + dev_err(&cldev->dev, "Can't Tx without a netdev\n"); + skb_queue_purge(&tx_skbs); + goto out; + } + + while (!skb_queue_empty(&tx_skbs)) { + struct sk_buff *skb = __skb_dequeue(&tx_skbs); + + trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR); + dev_queue_xmit(skb); + } + +out: + rcu_read_unlock(); +} + +static void iwl_mei_rx(struct mei_cl_device *cldev) +{ + struct iwl_sap_me_msg_hdr *hdr; + u8 msg[100]; + ssize_t ret; + + ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg)); + if (ret < 0) { + dev_err(&cldev->dev, "failed to receive data: %zd\n", ret); + return; + } + + if (ret == 0) { + dev_err(&cldev->dev, "got an empty response\n"); + return; + } + + hdr = (void *)msg; + trace_iwlmei_me_msg(hdr, false); + + switch (le32_to_cpu(hdr->type)) { + case SAP_ME_MSG_START_OK: + BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) > + sizeof(msg)); + + iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret); + break; + case SAP_ME_MSG_CHECK_SHARED_AREA: + iwl_mei_handle_check_shared_area(cldev); + break; + default: + dev_err(&cldev->dev, "got a RX notification: %d\n", + le32_to_cpu(hdr->type)); + break; + } +} + +static int iwl_mei_send_start(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + struct iwl_sap_me_msg_start msg = { + .hdr.type = cpu_to_le32(SAP_ME_MSG_START), + .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), + .hdr.len = cpu_to_le32(sizeof(msg)), + .supported_versions[0] = SAP_VERSION, + .init_data_seq_num = cpu_to_le16(0x100), + .init_notif_seq_num = cpu_to_le16(0x800), + }; + int ret; + + trace_iwlmei_me_msg(&msg.hdr, true); + ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); + if (ret != sizeof(msg)) { + dev_err(&cldev->dev, + "failed to send the SAP_ME_MSG_START message %d\n", + ret); + return ret; + } + + return 0; +} + +static int iwl_mei_enable(struct mei_cl_device *cldev) +{ + int ret; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "failed to enable the device: %d\n", ret); + return ret; + } + + ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx); + if (ret) { + dev_err(&cldev->dev, + "failed to register to the rx cb: %d\n", ret); + mei_cldev_disable(cldev); + return ret; + } + + return 0; +} + +struct iwl_mei_nvm *iwl_mei_get_nvm(void) +{ + struct iwl_mei_nvm *nvm = NULL; + struct iwl_mei *mei; + int ret; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev, + SAP_MSG_NOTIF_GET_NVM); + if (ret) + goto out; + + mutex_unlock(&iwl_mei_mutex); + + ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ); + if (!ret) + return NULL; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (mei->nvm) + nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL); + +out: + mutex_unlock(&iwl_mei_mutex); + return nvm; +} +EXPORT_SYMBOL_GPL(iwl_mei_get_nvm); + +int iwl_mei_get_ownership(void) +{ + struct iwl_mei *mei; + int ret; + + mutex_lock(&iwl_mei_mutex); + + /* In case we didn't have a bind */ + if (!iwl_mei_is_connected()) { + ret = 0; + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) { + ret = -ENODEV; + goto out; + } + + if (!mei->amt_enabled) { + ret = 0; + goto out; + } + + if (mei->got_ownership) { + ret = 0; + goto out; + } + + ret = iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); + if (ret) + goto out; + + mutex_unlock(&iwl_mei_mutex); + + ret = wait_event_timeout(mei->get_ownership_wq, + mei->got_ownership, HZ / 2); + if (!ret) + return -ETIMEDOUT; + + mutex_lock(&iwl_mei_mutex); + + /* In case we didn't have a bind */ + if (!iwl_mei_is_connected()) { + ret = 0; + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) { + ret = -ENODEV; + goto out; + } + + ret = !mei->got_ownership; + +out: + mutex_unlock(&iwl_mei_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iwl_mei_get_ownership); + +void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, + const struct iwl_mei_colloc_info *colloc_info) +{ + struct iwl_sap_notif_host_link_up msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .conn_info = { + .ssid_len = cpu_to_le32(conn_info->ssid_len), + .channel = conn_info->channel, + .band = conn_info->band, + .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher), + .auth_mode = cpu_to_le32(conn_info->auth_mode), + }, + }; + struct iwl_mei *mei; + + if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid)) + return; + + memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len); + memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN); + + if (colloc_info) { + msg.colloc_channel = colloc_info->channel; + msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1; + memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN); + } + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!mei->amt_enabled) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = + kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_host_associated); + +void iwl_mei_host_disassociated(void) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_host_link_down msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .type = HOST_LINK_DOWN_TYPE_LONG, + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = NULL; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated); + +void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) +{ + struct iwl_mei *mei; + u32 rfkill_state = 0; + struct iwl_sap_msg_dw msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + if (!sw_rfkill) + rfkill_state |= SAP_SW_RFKILL_DEASSERTED; + + if (!hw_rfkill) + rfkill_state |= SAP_HW_RFKILL_DEASSERTED; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + msg.val = cpu_to_le32(rfkill_state); + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + iwl_mei_cache.rf_kill = rfkill_state; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state); + +void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_host_nic_info msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + ether_addr_copy(msg.mac_address, mac_address); + ether_addr_copy(msg.nvm_address, nvm_address); + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + ether_addr_copy(iwl_mei_cache.mac_address, mac_address); + ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info); + +void iwl_mei_set_country_code(u16 mcc) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_country_code msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + .mcc = cpu_to_le16(mcc), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + iwl_mei_cache.mcc = mcc; + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_country_code); + +void iwl_mei_set_power_limit(const __le16 *power_limit) +{ + struct iwl_mei *mei; + struct iwl_sap_notif_sar_limits msg = { + .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), + .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), + }; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); + + iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); + +out: + kfree(iwl_mei_cache.power_limit); + iwl_mei_cache.power_limit = kmemdup(power_limit, + sizeof(msg.sar_chain_info_table), GFP_KERNEL); + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit); + +void iwl_mei_set_netdev(struct net_device *netdev) +{ + struct iwl_mei *mei; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) { + rcu_assign_pointer(iwl_mei_cache.netdev, netdev); + goto out; + } + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!netdev) { + struct net_device *dev = + rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + if (!dev) + goto out; + + netdev_rx_handler_unregister(dev); + } + + rcu_assign_pointer(iwl_mei_cache.netdev, netdev); + + if (netdev && mei->amt_enabled) + netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); + +out: + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_set_netdev); + +void iwl_mei_device_down(void) +{ + struct iwl_mei *mei; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_is_connected()) + goto out; + + mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); + + if (!mei) + goto out; + + if (!mei->csme_taking_ownership) + goto out; + + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED); + mei->csme_taking_ownership = false; +out: + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_device_down); + +int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) +{ + int ret; + + /* + * We must have a non-NULL priv pointer to not crash when there are + * multiple WiFi devices. + */ + if (!priv) + return -EINVAL; + + mutex_lock(&iwl_mei_mutex); + + /* do not allow registration if someone else already registered */ + if (iwl_mei_cache.priv || iwl_mei_cache.ops) { + ret = -EBUSY; + goto out; + } + + iwl_mei_cache.priv = priv; + iwl_mei_cache.ops = ops; + + if (iwl_mei_global_cldev) { + struct iwl_mei *mei = + mei_cldev_get_drvdata(iwl_mei_global_cldev); + + /* we have already a SAP connection */ + if (iwl_mei_is_connected()) + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_WIFIDR_UP); + } + ret = 0; + +out: + mutex_unlock(&iwl_mei_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iwl_mei_register); + +void iwl_mei_start_unregister(void) +{ + mutex_lock(&iwl_mei_mutex); + + /* At this point, the wifi driver should have removed the netdev */ + if (rcu_access_pointer(iwl_mei_cache.netdev)) + pr_err("Still had a netdev pointer set upon unregister\n"); + + kfree(iwl_mei_cache.conn_info); + iwl_mei_cache.conn_info = NULL; + kfree(iwl_mei_cache.power_limit); + iwl_mei_cache.power_limit = NULL; + iwl_mei_cache.ops = NULL; + /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */ + + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_start_unregister); + +void iwl_mei_unregister_complete(void) +{ + mutex_lock(&iwl_mei_mutex); + + iwl_mei_cache.priv = NULL; + + if (iwl_mei_global_cldev) { + struct iwl_mei *mei = + mei_cldev_get_drvdata(iwl_mei_global_cldev); + + iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); + mei->got_ownership = false; + } + + mutex_unlock(&iwl_mei_mutex); +} +EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + +static ssize_t +iwl_mei_dbgfs_send_start_message_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int ret; + + mutex_lock(&iwl_mei_mutex); + + if (!iwl_mei_global_cldev) { + ret = -ENODEV; + goto out; + } + + ret = iwl_mei_send_start(iwl_mei_global_cldev); + +out: + mutex_unlock(&iwl_mei_mutex); + return ret ?: count; +} + +static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = { + .write = iwl_mei_dbgfs_send_start_message_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + iwl_mei_get_ownership(); + + return count; +} + +static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = { + .write = iwl_mei_dbgfs_req_ownership_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static void iwl_mei_dbgfs_register(struct iwl_mei *mei) +{ + mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + if (!mei->dbgfs_dir) + return; + + debugfs_create_ulong("status", S_IRUSR, + mei->dbgfs_dir, &iwl_mei_status); + debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir, + mei, &iwl_mei_dbgfs_send_start_message_ops); + debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir, + mei, &iwl_mei_dbgfs_req_ownership_ops); +} + +static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) +{ + debugfs_remove_recursive(mei->dbgfs_dir); + mei->dbgfs_dir = NULL; +} + +#else + +static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {} +static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} + +#endif /* CONFIG_DEBUG_FS */ + +/* + * iwl_mei_probe - the probe function called by the mei bus enumeration + * + * This allocates the data needed by iwlmei and sets a pointer to this data + * into the mei_cl_device's drvdata. + * It starts the SAP protocol by sending the SAP_ME_MSG_START without + * waiting for the answer. The answer will be caught later by the Rx callback. + */ +static int iwl_mei_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct iwl_mei *mei; + int ret; + + mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL); + if (!mei) + return -ENOMEM; + + init_waitqueue_head(&mei->get_nvm_wq); + INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk); + INIT_DELAYED_WORK(&mei->csa_throttle_end_wk, + iwl_mei_csa_throttle_end_wk); + init_waitqueue_head(&mei->get_ownership_wq); + spin_lock_init(&mei->data_q_lock); + + mei_cldev_set_drvdata(cldev, mei); + mei->cldev = cldev; + + /* + * The CSME firmware needs to boot the internal WLAN client. Wait here + * so that the DMA map request will succeed. + */ + msleep(20); + + ret = iwl_mei_alloc_shared_mem(cldev); + if (ret) + goto free; + + iwl_mei_init_shared_mem(mei); + + ret = iwl_mei_enable(cldev); + if (ret) + goto free_shared_mem; + + iwl_mei_dbgfs_register(mei); + + /* + * We now have a Rx function in place, start the SAP procotol + * we expect to get the SAP_ME_MSG_START_OK response later on. + */ + mutex_lock(&iwl_mei_mutex); + ret = iwl_mei_send_start(cldev); + mutex_unlock(&iwl_mei_mutex); + if (ret) + goto debugfs_unregister; + + /* must be last */ + iwl_mei_global_cldev = cldev; + + return 0; + +debugfs_unregister: + iwl_mei_dbgfs_unregister(mei); + mei_cldev_disable(cldev); +free_shared_mem: + iwl_mei_free_shared_mem(cldev); +free: + mei_cldev_set_drvdata(cldev, NULL); + devm_kfree(&cldev->dev, mei); + + return ret; +} + +#define SEND_SAP_MAX_WAIT_ITERATION 10 + +static void iwl_mei_remove(struct mei_cl_device *cldev) +{ + struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); + int i; + + /* + * We are being removed while the bus is active, it means we are + * going to suspend/ shutdown, so the NIC will disappear. + */ + if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) + iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv); + + if (rcu_access_pointer(iwl_mei_cache.netdev)) { + struct net_device *dev; + + /* + * First take rtnl and only then the mutex to avoid an ABBA + * with iwl_mei_set_netdev() + */ + rtnl_lock(); + mutex_lock(&iwl_mei_mutex); + + /* + * If we are suspending and the wifi driver hasn't removed it's netdev + * yet, do it now. In any case, don't change the cache.netdev pointer. + */ + dev = rcu_dereference_protected(iwl_mei_cache.netdev, + lockdep_is_held(&iwl_mei_mutex)); + + netdev_rx_handler_unregister(dev); + mutex_unlock(&iwl_mei_mutex); + rtnl_unlock(); + } + + mutex_lock(&iwl_mei_mutex); + + /* + * Tell CSME that we are going down so that it won't access the + * memory anymore, make sure this message goes through immediately. + */ + mei->csa_throttled = false; + iwl_mei_send_sap_msg(mei->cldev, + SAP_MSG_NOTIF_HOST_GOES_DOWN); + + for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { + if (!iwl_mei_host_to_me_data_pending(mei)) + break; + + msleep(5); + } + + /* + * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message, + * it means that it will probably keep reading memory that we are going + * to unmap and free, expect IOMMU error messages. + */ + if (i == SEND_SAP_MAX_WAIT_ITERATION) + dev_err(&mei->cldev->dev, + "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); + + mutex_unlock(&iwl_mei_mutex); + + /* + * This looks strange, but this lock is taken here to make sure that + * iwl_mei_add_data_to_ring called from the Tx path sees that we + * clear the IWL_MEI_STATUS_SAP_CONNECTED bit. + * Rx isn't a problem because the rx_handler can't be called after + * having been unregistered. + */ + spin_lock_bh(&mei->data_q_lock); + clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); + spin_unlock_bh(&mei->data_q_lock); + + if (iwl_mei_cache.ops) + iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); + + /* + * mei_cldev_disable will return only after all the MEI Rx is done. + * It must be called when iwl_mei_mutex is *not* held, since it waits + * for our Rx handler to complete. + * After it returns, no new Rx will start. + */ + mei_cldev_disable(cldev); + + /* + * Since the netdev was already removed and the netdev's removal + * includes a call to synchronize_net() so that we know there won't be + * any new Rx that will trigger the following workers. + */ + cancel_work_sync(&mei->send_csa_msg_wk); + cancel_delayed_work_sync(&mei->csa_throttle_end_wk); + + /* + * If someone waits for the ownership, let him know that we are going + * down and that we are not connected anymore. He'll be able to take + * the device. + */ + wake_up_all(&mei->get_ownership_wq); + + mutex_lock(&iwl_mei_mutex); + + iwl_mei_global_cldev = NULL; + + wake_up_all(&mei->get_nvm_wq); + + iwl_mei_free_shared_mem(cldev); + + iwl_mei_dbgfs_unregister(mei); + + mei_cldev_set_drvdata(cldev, NULL); + + kfree(mei->nvm); + + kfree(rcu_access_pointer(mei->filters)); + + devm_kfree(&cldev->dev, mei); + + mutex_unlock(&iwl_mei_mutex); +} + +static const struct mei_cl_device_id iwl_mei_tbl[] = { + { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY}, + + /* required last entry */ + { } +}; + +/* + * Do not export the device table because this module is loaded by + * iwlwifi's dependency. + */ + +static struct mei_cl_driver iwl_mei_cl_driver = { + .id_table = iwl_mei_tbl, + .name = KBUILD_MODNAME, + .probe = iwl_mei_probe, + .remove = iwl_mei_remove, +}; + +module_mei_cl_driver(iwl_mei_cl_driver); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c new file mode 100644 index 000000000000..5f966af69720 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <uapi/linux/if_ether.h> +#include <uapi/linux/if_arp.h> +#include <uapi/linux/icmp.h> + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/ieee80211.h> + +#include <net/cfg80211.h> +#include <net/ip.h> + +#include <linux/if_arp.h> +#include <linux/icmp.h> +#include <linux/udp.h> +#include <linux/ip.h> +#include <linux/mm.h> + +#include "internal.h" +#include "sap.h" +#include "iwl-mei.h" + +/* + * Returns true if further filtering should be stopped. Only in that case + * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters + * should be checked. + */ +static bool iwl_mei_rx_filter_eth(const struct ethhdr *ethhdr, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_eth_filter *filt; + + /* This filter is not relevant for UCAST packet */ + if (!is_multicast_ether_addr(ethhdr->h_dest) || + is_broadcast_ether_addr(ethhdr->h_dest)) + return false; + + for (filt = &filters->eth_filters[0]; + filt < &filters->eth_filters[0] + ARRAY_SIZE(filters->eth_filters); + filt++) { + /* Assume there are no enabled filter after a disabled one */ + if (!(filt->flags & SAP_ETH_FILTER_ENABLED)) + break; + + if (compare_ether_header(filt->mac_address, ethhdr->h_dest)) + continue; + + /* Packet needs to reach the host's stack */ + if (filt->flags & SAP_ETH_FILTER_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + /* We have an authoritative answer, stop filtering */ + if (filt->flags & SAP_ETH_FILTER_STOP) { + *pass_to_csme = true; + return true; + } + + return false; + } + + /* MCAST frames that don't match layer 2 filters are not sent to ME */ + *pass_to_csme = false; + + return true; +} + +/* + * Returns true iff the frame should be passed to CSME in which case + * rx_handler_res is set. + */ +static bool iwl_mei_rx_filter_arp(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter; + const struct arphdr *arp; + const __be32 *target_ip; + u32 flags = le32_to_cpu(filt->flags); + + if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) + return false; + + arp = arp_hdr(skb); + + /* Handle only IPv4 over ethernet ARP frames */ + if (arp->ar_hrd != htons(ARPHRD_ETHER) || + arp->ar_pro != htons(ETH_P_IP)) + return false; + + /* + * After the ARP header, we have: + * src MAC address - 6 bytes + * src IP address - 4 bytes + * target MAC addess - 6 bytes + */ + target_ip = (void *)((u8 *)(arp + 1) + + ETH_ALEN + sizeof(__be32) + ETH_ALEN); + + /* + * ARP request is forwarded to ME only if IP address match in the + * ARP request's target ip field. + */ + if (arp->ar_op == htons(ARPOP_REQUEST) && + (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS)) && + (filt->ipv4_addr == 0 || filt->ipv4_addr == *target_ip)) { + if (flags & SAP_IPV4_FILTER_ARP_REQ_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + /* ARP reply is always forwarded to ME regardless of the IP */ + if (flags & SAP_IPV4_FILTER_ARP_RESP_PASS && + arp->ar_op == htons(ARPOP_REPLY)) { + if (flags & SAP_IPV4_FILTER_ARP_RESP_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + return false; +} + +static bool +iwl_mei_rx_filter_tcp_udp(struct sk_buff *skb, bool ip_match, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_flex_filter *filt; + + for (filt = &filters->flex_filters[0]; + filt < &filters->flex_filters[0] + ARRAY_SIZE(filters->flex_filters); + filt++) { + if (!(filt->flags & SAP_FLEX_FILTER_ENABLED)) + break; + + /* + * We are required to have a match on the IP level and we didn't + * have such match. + */ + if ((filt->flags & + (SAP_FLEX_FILTER_IPV4 | SAP_FLEX_FILTER_IPV6)) && + !ip_match) + continue; + + if ((filt->flags & SAP_FLEX_FILTER_UDP) && + ip_hdr(skb)->protocol != IPPROTO_UDP) + continue; + + if ((filt->flags & SAP_FLEX_FILTER_TCP) && + ip_hdr(skb)->protocol != IPPROTO_TCP) + continue; + + /* + * We must have either a TCP header or a UDP header, both + * starts with a source port and then a destination port. + * Both are big endian words. + * Use a UDP header and that will work for TCP as well. + */ + if ((filt->src_port && filt->src_port != udp_hdr(skb)->source) || + (filt->dst_port && filt->dst_port != udp_hdr(skb)->dest)) + continue; + + if (filt->flags & SAP_FLEX_FILTER_COPY) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + + return false; +} + +static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + const struct iwl_sap_ipv4_filter *filt = &filters->ipv4_filter; + const struct iphdr *iphdr; + unsigned int iphdrlen; + bool match; + + if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) || + !pskb_may_pull(skb, skb_network_offset(skb) + + sizeof(ip_hdrlen(skb) - sizeof(*iphdr)))) + return false; + + iphdrlen = ip_hdrlen(skb); + iphdr = ip_hdr(skb); + match = !filters->ipv4_filter.ipv4_addr || + filters->ipv4_filter.ipv4_addr == iphdr->daddr; + + skb_set_transport_header(skb, skb_network_offset(skb) + iphdrlen); + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + case IPPROTO_TCP: + /* + * UDP header is shorter than TCP header and we look at the first bytes + * of the header anyway (see below). + * If we have a truncated TCP packet, let CSME handle this. + */ + if (!pskb_may_pull(skb, skb_transport_offset(skb) + + sizeof(struct udphdr))) + return false; + + return iwl_mei_rx_filter_tcp_udp(skb, match, + filters, rx_handler_res); + + case IPPROTO_ICMP: { + struct icmphdr *icmp; + + if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(*icmp))) + return false; + + icmp = icmp_hdr(skb); + + /* + * Don't pass echo requests to ME even if it wants it as we + * want the host to answer. + */ + if ((filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS)) && + match && (icmp->type != ICMP_ECHO || icmp->code != 0)) { + if (filt->flags & cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY)) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + break; + } + case IPPROTO_ICMPV6: + /* TODO: Should we have the same ICMP request logic here too? */ + if ((filters->icmpv6_flags & cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED) && + match)) { + if (filters->icmpv6_flags & + cpu_to_le32(SAP_ICMPV6_FILTER_COPY)) + *rx_handler_res = RX_HANDLER_PASS; + else + *rx_handler_res = RX_HANDLER_CONSUMED; + + return true; + } + break; + default: + return false; + } + + return false; +} + +static bool iwl_mei_rx_filter_ipv6(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res) +{ + *rx_handler_res = RX_HANDLER_PASS; + + /* TODO */ + + return false; +} + +static rx_handler_result_t +iwl_mei_rx_pass_to_csme(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme) +{ + const struct ethhdr *ethhdr = (void *)skb_mac_header(skb); + rx_handler_result_t rx_handler_res = RX_HANDLER_PASS; + bool (*filt_handler)(struct sk_buff *skb, + const struct iwl_sap_oob_filters *filters, + rx_handler_result_t *rx_handler_res); + + /* + * skb->data points the IP header / ARP header and the ETH header + * is in the headroom. + */ + skb_reset_network_header(skb); + + /* + * MCAST IP packets sent by us are received again here without + * an ETH header. Drop them here. + */ + if (!skb_mac_offset(skb)) + return RX_HANDLER_PASS; + + if (skb_headroom(skb) < sizeof(*ethhdr)) + return RX_HANDLER_PASS; + + if (iwl_mei_rx_filter_eth(ethhdr, filters, + pass_to_csme, &rx_handler_res)) + return rx_handler_res; + + switch (skb->protocol) { + case htons(ETH_P_IP): + filt_handler = iwl_mei_rx_filter_ipv4; + break; + case htons(ETH_P_ARP): + filt_handler = iwl_mei_rx_filter_arp; + break; + case htons(ETH_P_IPV6): + filt_handler = iwl_mei_rx_filter_ipv6; + break; + default: + *pass_to_csme = false; + return rx_handler_res; + } + + *pass_to_csme = filt_handler(skb, filters, &rx_handler_res); + + return rx_handler_res; +} + +rx_handler_result_t iwl_mei_rx_filter(struct sk_buff *orig_skb, + const struct iwl_sap_oob_filters *filters, + bool *pass_to_csme) +{ + rx_handler_result_t ret; + struct sk_buff *skb; + + ret = iwl_mei_rx_pass_to_csme(orig_skb, filters, pass_to_csme); + + if (!*pass_to_csme) + return RX_HANDLER_PASS; + + if (ret == RX_HANDLER_PASS) + skb = skb_copy(orig_skb, GFP_ATOMIC); + else + skb = orig_skb; + + /* CSME wants the MAC header as well, push it back */ + skb_push(skb, skb->data - skb_mac_header(skb)); + + /* + * Add the packet that CSME wants to get to the ring. Don't send the + * Check Shared Area HECI message since this is not possible from the + * Rx context. The caller will schedule a worker to do just that. + */ + iwl_mei_add_data_to_ring(skb, false); + + /* + * In case we drop the packet, don't free it, the caller will do that + * for us + */ + if (ret == RX_HANDLER_PASS) + dev_kfree_skb(skb); + + return ret; +} + +#define DHCP_SERVER_PORT 67 +#define DHCP_CLIENT_PORT 68 +void iwl_mei_tx_copy_to_csme(struct sk_buff *origskb, unsigned int ivlen) +{ + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + struct ethhdr ethhdr; + struct ethhdr *eth; + + /* Catch DHCP packets */ + if (origskb->protocol != htons(ETH_P_IP) || + ip_hdr(origskb)->protocol != IPPROTO_UDP || + udp_hdr(origskb)->source != htons(DHCP_CLIENT_PORT) || + udp_hdr(origskb)->dest != htons(DHCP_SERVER_PORT)) + return; + + /* + * We could be a bit less aggressive here and not copy everything, but + * this is very rare anyway, do don't bother much. + */ + skb = skb_copy(origskb, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = origskb->protocol; + + hdr = (void *)skb->data; + + memcpy(ethhdr.h_dest, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(ethhdr.h_source, ieee80211_get_SA(hdr), ETH_ALEN); + + /* + * Remove the ieee80211 header + IV + SNAP but leave the ethertype + * We still have enough headroom for the sap header. + */ + pskb_pull(skb, ieee80211_hdrlen(hdr->frame_control) + ivlen + 6); + eth = skb_push(skb, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source)); + memcpy(eth, ðhdr, sizeof(ethhdr.h_dest) + sizeof(ethhdr.h_source)); + + iwl_mei_add_data_to_ring(skb, true); + + dev_kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme); diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h new file mode 100644 index 000000000000..11e3009121cc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#ifndef __sap_h__ +#define __sap_h__ + +#include "mei/iwl-mei.h" + +/** + * DOC: Introduction + * + * SAP is the protocol used by the Intel Wireless driver (iwlwifi) + * and the wireless driver implemented in the CSME firmware. + * It allows to do several things: + * 1) Decide who is the owner of the device: CSME or the host + * 2) When the host is the owner of the device, CSME can still + * send and receive packets through iwlwifi. + * + * The protocol uses the ME interface (mei driver) to send + * messages to the CSME firmware. Those messages have a header + * &struct iwl_sap_me_msg_hdr and this header is followed + * by a payload. + * + * Since this messaging system cannot support high amounts of + * traffic, iwlwifi and the CSME firmware's WLAN driver have an + * addtional communication pipe to exchange information. The body + * of the message is copied to a shared area and the message that + * goes over the ME interface just signals the other side + * that a new message is waiting in the shared area. The ME + * interface is used only for signaling and not to transfer + * the payload. + * + * This shared area of memory is DMA'able mapped to be + * writable by both the CSME firmware and iwlwifi. It is + * mapped to address space of the device that controls the ME + * interface's DMA engine. Any data that iwlwifi needs to + * send to the CSME firmware needs to be copied to there. + */ + +/** + * DOC: Initial Handshake + * + * Once we get a link to the CMSE's WLAN driver we start the handshake + * to establish the shared memory that will allow the communication between + * the CSME's WLAN driver and the host. + * + * 1) Host sends %SAP_ME_MSG_START message with the physical address + * of the shared area. + * 2) CSME replies with %SAP_ME_MSG_START_OK which includes the versions + * protocol versions supported by CSME. + */ + +/** + * DOC: Host and driver state messages + * + * In order to let CSME konw about the host state and the host driver state, + * the host sends messages that let CSME know about the host's state. + * When the host driver is loaded, the host sends %SAP_MSG_NOTIF_WIFIDR_UP. + * When the host driver is unloaded, the host sends %SAP_MSG_NOTIF_WIFIDR_DOWN. + * When the iwlmei is unloaded, %SAP_MSG_NOTIF_HOST_GOES_DOWN is sent to let + * CSME know not to access the shared memory anymore since it'll be freed. + * + * CSME will reply to SAP_MSG_NOTIF_WIFIDR_UP by + * %SAP_MSG_NOTIF_AMT_STATE to let the host driver whether CSME can use the + * WiFi device or not followed by %SAP_MSG_NOTIF_CSME_CONN_STATUS to inform + * the host driver on the connection state of CSME. + * + * When host is associated to an AP, it must send %SAP_MSG_NOTIF_HOST_LINK_UP + * and when it disconnect from the AP, it must send + * %SAP_MSG_NOTIF_HOST_LINK_DOWN. + */ + +/** + * DOC: Ownership + * + * The device can be controlled either by the CSME firmware or + * by the host driver: iwlwifi. There is a negotiaion between + * those two entities to determine who controls (or owns) the + * device. Since the CSME can control the device even when the + * OS is not working or even missing, the CSME can request the + * device if it comes to the conclusion that the OS's host driver + * is not operational. This is why the host driver needs to + * signal CSME that it is up and running. If the driver is + * unloaded, it'll signal CSME that it is going down so that + * CSME can take ownership. + */ + +/** + * DOC: Ownership transfer + * + * When the host driver needs the device, it'll send the + * %SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP that will be replied by + * %SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ which will let the + * host know whether the ownership is granted or no. If the ownership is + * granted, the hosts sends %SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED. + * + * When CSME requests ownership, it'll send the + * %SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP and give some time to host to stop + * accessing the device. The host needs to send + * %SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED to confirm that it won't access + * the device anymore. If the host failed to send this message fast enough, + * CSME will take ownership on the device anyway. + * When CSME is willing to release the ownership, it'll send + * %SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP. + */ + +/** + * DOC: Data messages + * + * Data messages must be sent and receives on a separate queue in the shared + * memory. Almost all the data messages use the %SAP_MSG_DATA_PACKET for both + * packets sent by CSME to the host to be sent to the AP or for packets + * received from the AP and sent by the host to CSME. + * CSME sends filters to the host to let the host what inbound packets it must + * send to CSME. Those filters are received by the host as a + * %SAP_MSG_NOTIF_CSME_FILTERS command. + * The only outbound packets that must be sent to CSME are the DHCP packets. + * Those packets must use the %SAP_MSG_CB_DATA_PACKET message. + */ + +/** + * enum iwl_sap_me_msg_id - the ID of the ME message + * @SAP_ME_MSG_START: See &struct iwl_sap_me_msg_start. + * @SAP_ME_MSG_START_OK: See &struct iwl_sap_me_msg_start_ok. + * @SAP_ME_MSG_CHECK_SHARED_AREA: This message has no payload. + */ +enum iwl_sap_me_msg_id { + SAP_ME_MSG_START = 1, + SAP_ME_MSG_START_OK, + SAP_ME_MSG_CHECK_SHARED_AREA, +}; + +/** + * struct iwl_sap_me_msg_hdr - the header of the ME message + * @type: the type of the message, see &enum iwl_sap_me_msg_id. + * @seq_num: a sequence number used for debug only. + * @len: the length of the mssage. + */ +struct iwl_sap_me_msg_hdr { + __le32 type; + __le32 seq_num; + __le32 len; +} __packed; + +/** + * struct iwl_sap_me_msg_start - used for the %SAP_ME_MSG_START message + * @hdr: See &struct iwl_sap_me_msg_hdr. + * @shared_mem: physical address of SAP shared memory area. + * @init_data_seq_num: seq_num of the first data packet HOST -> CSME. + * @init_notif_seq_num: seq_num of the first notification HOST -> CSME. + * @supported_versions: The host sends to the CSME a zero-terminated array + * of versions its supports. + * + * This message is sent by the host to CSME and will responded by the + * %SAP_ME_MSG_START_OK message. + */ +struct iwl_sap_me_msg_start { + struct iwl_sap_me_msg_hdr hdr; + __le64 shared_mem; + __le16 init_data_seq_num; + __le16 init_notif_seq_num; + u8 supported_versions[64]; +} __packed; + +/** + * struct iwl_sap_me_msg_start_ok - used for the %SAP_ME_MSG_START_OK + * @hdr: See &struct iwl_sap_me_msg_hdr + * @init_data_seq_num: Not used. + * @init_notif_seq_num: Not used + * @supported_version: The version that will be used. + * @reserved: For alignment. + * + * This message is sent by CSME to the host in response to the + * %SAP_ME_MSG_START message. + */ +struct iwl_sap_me_msg_start_ok { + struct iwl_sap_me_msg_hdr hdr; + __le16 init_data_seq_num; + __le16 init_notif_seq_num; + u8 supported_version; + u8 reserved[3]; +} __packed; + +/** + * enum iwl_sap_msg - SAP messages + * @SAP_MSG_NOTIF_BOTH_WAYS_MIN: Not used. + * @SAP_MSG_NOTIF_PING: No payload. Solicitate a response message (check-alive). + * @SAP_MSG_NOTIF_PONG: No payload. The response message. + * @SAP_MSG_NOTIF_BOTH_WAYS_MAX: Not used. + * + * @SAP_MSG_NOTIF_FROM_CSME_MIN: Not used. + * @SAP_MSG_NOTIF_CSME_FILTERS: TODO + * @SAP_MSG_NOTIF_AMT_STATE: Payload is a DW. Any non-zero value means + * that CSME is enabled. + * @SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ: Payload is a DW. 0 means + * the host will not get ownership. Any other value means the host is + * the owner. + * @SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_TRIGGER_IP_REFRESH: No payload. + * @SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_NIC_OWNER: Payload is a DW. See &enum iwl_sap_nic_owner. + * @SAP_MSG_NOTIF_CSME_CONN_STATUS: See &struct iwl_sap_notif_conn_status. + * @SAP_MSG_NOTIF_NVM: See &struct iwl_sap_nvm. + * @SAP_MSG_NOTIF_FROM_CSME_MAX: Not used. + * + * @SAP_MSG_NOTIF_FROM_HOST_MIN: Not used. + * @SAP_MSG_NOTIF_BAND_SELECTION: TODO + * @SAP_MSG_NOTIF_RADIO_STATE: Payload is a DW. + * See &enum iwl_sap_radio_state_bitmap. + * @SAP_MSG_NOTIF_NIC_INFO: See &struct iwl_sap_notif_host_nic_info. + * @SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP: No payload. + * @SAP_MSG_NOTIF_HOST_SUSPENDS: Payload is a DW. Bitmap described in + * &enum iwl_sap_notif_host_suspends_bitmap. + * @SAP_MSG_NOTIF_HOST_RESUMES: Payload is a DW. 0 or 1. 1 says that + * the CSME should re-initialize the init control block. + * @SAP_MSG_NOTIF_HOST_GOES_DOWN: No payload. + * @SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED: No payload. + * @SAP_MSG_NOTIF_COUNTRY_CODE: See &struct iwl_sap_notif_country_code. + * @SAP_MSG_NOTIF_HOST_LINK_UP: See &struct iwl_sap_notif_host_link_up. + * @SAP_MSG_NOTIF_HOST_LINK_DOWN: See &struct iwl_sap_notif_host_link_down. + * @SAP_MSG_NOTIF_WHO_OWNS_NIC: No payload. + * @SAP_MSG_NOTIF_WIFIDR_DOWN: No payload. + * @SAP_MSG_NOTIF_WIFIDR_UP: No payload. + * @SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED: No payload. + * @SAP_MSG_NOTIF_SAR_LIMITS: See &struct iwl_sap_notif_sar_limits. + * @SAP_MSG_NOTIF_GET_NVM: No payload. Triggers %SAP_MSG_NOTIF_NVM. + * @SAP_MSG_NOTIF_FROM_HOST_MAX: Not used. + * + * @SAP_MSG_DATA_MIN: Not used. + * @SAP_MSG_DATA_PACKET: Packets that passed the filters defined by + * %SAP_MSG_NOTIF_CSME_FILTERS. The payload is &struct iwl_sap_hdr with + * the payload of the packet immediately afterwards. + * @SAP_MSG_CB_DATA_PACKET: Indicates to CSME that we transmitted a specific + * packet. Used only for DHCP transmitted packets. See + * &struct iwl_sap_cb_data. + * @SAP_MSG_DATA_MAX: Not used. + */ +enum iwl_sap_msg { + SAP_MSG_NOTIF_BOTH_WAYS_MIN = 0, + SAP_MSG_NOTIF_PING = 1, + SAP_MSG_NOTIF_PONG = 2, + SAP_MSG_NOTIF_BOTH_WAYS_MAX, + + SAP_MSG_NOTIF_FROM_CSME_MIN = 500, + SAP_MSG_NOTIF_CSME_FILTERS = SAP_MSG_NOTIF_FROM_CSME_MIN, + /* 501 is deprecated */ + SAP_MSG_NOTIF_AMT_STATE = 502, + SAP_MSG_NOTIF_CSME_REPLY_TO_HOST_OWNERSHIP_REQ = 503, + SAP_MSG_NOTIF_CSME_TAKING_OWNERSHIP = 504, + SAP_MSG_NOTIF_TRIGGER_IP_REFRESH = 505, + SAP_MSG_NOTIF_CSME_CAN_RELEASE_OWNERSHIP = 506, + /* 507 is deprecated */ + /* 508 is deprecated */ + /* 509 is deprecated */ + /* 510 is deprecated */ + SAP_MSG_NOTIF_NIC_OWNER = 511, + SAP_MSG_NOTIF_CSME_CONN_STATUS = 512, + SAP_MSG_NOTIF_NVM = 513, + SAP_MSG_NOTIF_FROM_CSME_MAX, + + SAP_MSG_NOTIF_FROM_HOST_MIN = 1000, + SAP_MSG_NOTIF_BAND_SELECTION = SAP_MSG_NOTIF_FROM_HOST_MIN, + SAP_MSG_NOTIF_RADIO_STATE = 1001, + SAP_MSG_NOTIF_NIC_INFO = 1002, + SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP = 1003, + SAP_MSG_NOTIF_HOST_SUSPENDS = 1004, + SAP_MSG_NOTIF_HOST_RESUMES = 1005, + SAP_MSG_NOTIF_HOST_GOES_DOWN = 1006, + SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED = 1007, + SAP_MSG_NOTIF_COUNTRY_CODE = 1008, + SAP_MSG_NOTIF_HOST_LINK_UP = 1009, + SAP_MSG_NOTIF_HOST_LINK_DOWN = 1010, + SAP_MSG_NOTIF_WHO_OWNS_NIC = 1011, + SAP_MSG_NOTIF_WIFIDR_DOWN = 1012, + SAP_MSG_NOTIF_WIFIDR_UP = 1013, + /* 1014 is deprecated */ + SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED = 1015, + SAP_MSG_NOTIF_SAR_LIMITS = 1016, + SAP_MSG_NOTIF_GET_NVM = 1017, + SAP_MSG_NOTIF_FROM_HOST_MAX, + + SAP_MSG_DATA_MIN = 2000, + SAP_MSG_DATA_PACKET = SAP_MSG_DATA_MIN, + SAP_MSG_CB_DATA_PACKET = 2001, + SAP_MSG_DATA_MAX, +}; + +/** + * struct iwl_sap_hdr - prefixes any SAP message + * @type: See &enum iwl_sap_msg. + * @len: The length of the message (header not included). + * @seq_num: For debug. + * @payload: The payload of the message. + */ +struct iwl_sap_hdr { + __le16 type; + __le16 len; + __le32 seq_num; + u8 payload[0]; +}; + +/** + * struct iwl_sap_msg_dw - suits any DW long SAP message + * @hdr: The SAP header + * @val: The value of the DW. + */ +struct iwl_sap_msg_dw { + struct iwl_sap_hdr hdr; + __le32 val; +}; + +/** + * enum iwl_sap_nic_owner - used by %SAP_MSG_NOTIF_NIC_OWNER + * @SAP_NIC_OWNER_UNKNOWN: Not used. + * @SAP_NIC_OWNER_HOST: The host owns the NIC. + * @SAP_NIC_OWNER_ME: CSME owns the NIC. + */ +enum iwl_sap_nic_owner { + SAP_NIC_OWNER_UNKNOWN, + SAP_NIC_OWNER_HOST, + SAP_NIC_OWNER_ME, +}; + +enum iwl_sap_wifi_auth_type { + SAP_WIFI_AUTH_TYPE_OPEN = IWL_MEI_AKM_AUTH_OPEN, + SAP_WIFI_AUTH_TYPE_RSNA = IWL_MEI_AKM_AUTH_RSNA, + SAP_WIFI_AUTH_TYPE_RSNA_PSK = IWL_MEI_AKM_AUTH_RSNA_PSK, + SAP_WIFI_AUTH_TYPE_SAE = IWL_MEI_AKM_AUTH_SAE, + SAP_WIFI_AUTH_TYPE_MAX, +}; + +/** + * enum iwl_sap_wifi_cipher_alg + * @SAP_WIFI_CIPHER_ALG_NONE: TBD + * @SAP_WIFI_CIPHER_ALG_CCMP: TBD + * @SAP_WIFI_CIPHER_ALG_GCMP: TBD + * @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD + */ +enum iwl_sap_wifi_cipher_alg { + SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE, + SAP_WIFI_CIPHER_ALG_CCMP = IWL_MEI_CIPHER_CCMP, + SAP_WIFI_CIPHER_ALG_GCMP = IWL_MEI_CIPHER_GCMP, + SAP_WIFI_CIPHER_ALG_GCMP_256 = IWL_MEI_CIPHER_GCMP_256, +}; + +/** + * struct iwl_sap_notif_connection_info - nested in other structures + * @ssid_len: The length of the SSID. + * @ssid: The SSID. + * @auth_mode: The authentication mode. See &enum iwl_sap_wifi_auth_type. + * @pairwise_cipher: The cipher used for unicast packets. + * See &enum iwl_sap_wifi_cipher_alg. + * @channel: The channel on which we are associated. + * @band: The band on which we are associated. + * @reserved: For alignment. + * @bssid: The BSSID. + * @reserved1: For alignment. + */ +struct iwl_sap_notif_connection_info { + __le32 ssid_len; + u8 ssid[32]; + __le32 auth_mode; + __le32 pairwise_cipher; + u8 channel; + u8 band; + __le16 reserved; + u8 bssid[6]; + __le16 reserved1; +} __packed; + +/** + * enum iwl_sap_scan_request - for the scan_request field + * @SCAN_REQUEST_FILTERING: Filtering is requested. + * @SCAN_REQUEST_FAST: Fast scan is requested. + */ +enum iwl_sap_scan_request { + SCAN_REQUEST_FILTERING = 1 << 0, + SCAN_REQUEST_FAST = 1 << 1, +}; + +/** + * struct iwl_sap_notif_conn_status - payload of %SAP_MSG_NOTIF_CSME_CONN_STATUS + * @hdr: The SAP header + * @link_prot_state: Non-zero if link protection is active. + * @scan_request: See &enum iwl_sap_scan_request. + * @conn_info: Information about the connection. + */ +struct iwl_sap_notif_conn_status { + struct iwl_sap_hdr hdr; + __le32 link_prot_state; + __le32 scan_request; + struct iwl_sap_notif_connection_info conn_info; +} __packed; + +/** + * enum iwl_sap_radio_state_bitmap - used for %SAP_MSG_NOTIF_RADIO_STATE + * @SAP_SW_RFKILL_DEASSERTED: If set, SW RfKill is de-asserted + * @SAP_HW_RFKILL_DEASSERTED: If set, HW RfKill is de-asserted + * + * If both bits are set, then the radio is on. + */ +enum iwl_sap_radio_state_bitmap { + SAP_SW_RFKILL_DEASSERTED = 1 << 0, + SAP_HW_RFKILL_DEASSERTED = 1 << 1, +}; + +/** + * enum iwl_sap_notif_host_suspends_bitmap - used for %SAP_MSG_NOTIF_HOST_SUSPENDS + * @SAP_OFFER_NIC: TBD + * @SAP_FILTER_CONFIGURED: TBD + * @SAP_NLO_CONFIGURED: TBD + * @SAP_HOST_OWNS_NIC: TBD + * @SAP_LINK_PROTECTED: TBD + */ +enum iwl_sap_notif_host_suspends_bitmap { + SAP_OFFER_NIC = 1 << 0, + SAP_FILTER_CONFIGURED = 1 << 1, + SAP_NLO_CONFIGURED = 1 << 2, + SAP_HOST_OWNS_NIC = 1 << 3, + SAP_LINK_PROTECTED = 1 << 4, +}; + +/** + * struct iwl_sap_notif_country_code - payload of %SAP_MSG_NOTIF_COUNTRY_CODE + * @hdr: The SAP header + * @mcc: The country code. + * @source_id: TBD + * @reserved: For alignment. + * @diff_time: TBD + */ +struct iwl_sap_notif_country_code { + struct iwl_sap_hdr hdr; + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 diff_time; +} __packed; + +/** + * struct iwl_sap_notif_host_link_up - payload of %SAP_MSG_NOTIF_HOST_LINK_UP + * @hdr: The SAP header + * @conn_info: Information about the connection. + * @colloc_channel: The collocated channel + * @colloc_band: The band of the collocated channel. + * @reserved: For alignment. + * @colloc_bssid: The collocated BSSID. + * @reserved1: For alignment. + */ +struct iwl_sap_notif_host_link_up { + struct iwl_sap_hdr hdr; + struct iwl_sap_notif_connection_info conn_info; + u8 colloc_channel; + u8 colloc_band; + __le16 reserved; + u8 colloc_bssid[6]; + __le16 reserved1; +} __packed; + +/** + * enum iwl_sap_notif_link_down_type - used in &struct iwl_sap_notif_host_link_down + * @HOST_LINK_DOWN_TYPE_NONE: TBD + * @HOST_LINK_DOWN_TYPE_TEMPORARY: TBD + * @HOST_LINK_DOWN_TYPE_LONG: TBD + */ +enum iwl_sap_notif_link_down_type { + HOST_LINK_DOWN_TYPE_NONE, + HOST_LINK_DOWN_TYPE_TEMPORARY, + HOST_LINK_DOWN_TYPE_LONG, +}; + +/** + * struct iwl_sap_notif_host_link_down - payload for %SAP_MSG_NOTIF_HOST_LINK_DOWN + * @hdr: The SAP header + * @type: See &enum iwl_sap_notif_link_down_type. + * @reserved: For alignment. + * @reason_valid: If 0, ignore the next field. + * @reason: The reason of the disconnection. + */ +struct iwl_sap_notif_host_link_down { + struct iwl_sap_hdr hdr; + u8 type; + u8 reserved[2]; + u8 reason_valid; + __le32 reason; +} __packed; + +/** + * struct iwl_sap_notif_host_nic_info - payload for %SAP_MSG_NOTIF_NIC_INFO + * @hdr: The SAP header + * @mac_address: The MAC address as configured to the interface. + * @nvm_address: The MAC address as configured in the NVM. + */ +struct iwl_sap_notif_host_nic_info { + struct iwl_sap_hdr hdr; + u8 mac_address[6]; + u8 nvm_address[6]; +} __packed; + +/** + * struct iwl_sap_notif_dw - payload is a dw + * @hdr: The SAP header. + * @dw: The payload. + */ +struct iwl_sap_notif_dw { + struct iwl_sap_hdr hdr; + __le32 dw; +} __packed; + +/** + * struct iwl_sap_notif_sar_limits - payload for %SAP_MSG_NOTIF_SAR_LIMITS + * @hdr: The SAP header + * @sar_chain_info_table: Tx power limits. + */ +struct iwl_sap_notif_sar_limits { + struct iwl_sap_hdr hdr; + __le16 sar_chain_info_table[2][5]; +} __packed; + +/** + * enum iwl_sap_nvm_caps - capabilities for NVM SAP + * @SAP_NVM_CAPS_LARI_SUPPORT: Lari is supported + * @SAP_NVM_CAPS_11AX_SUPPORT: 11AX is supported + */ +enum iwl_sap_nvm_caps { + SAP_NVM_CAPS_LARI_SUPPORT = BIT(0), + SAP_NVM_CAPS_11AX_SUPPORT = BIT(1), +}; + +/** + * struct iwl_sap_nvm - payload for %SAP_MSG_NOTIF_NVM + * @hdr: The SAP header. + * @hw_addr: The MAC address + * @n_hw_addrs: The number of MAC addresses + * @reserved: For alignment. + * @radio_cfg: The radio configuration. + * @caps: See &enum iwl_sap_nvm_caps. + * @nvm_version: The version of the NVM. + * @channels: The data for each channel. + */ +struct iwl_sap_nvm { + struct iwl_sap_hdr hdr; + u8 hw_addr[6]; + u8 n_hw_addrs; + u8 reserved; + __le32 radio_cfg; + __le32 caps; + __le32 nvm_version; + __le32 channels[110]; +} __packed; + +/** + * enum iwl_sap_eth_filter_flags - used in &struct iwl_sap_eth_filter + * @SAP_ETH_FILTER_STOP: Do not process further filters. + * @SAP_ETH_FILTER_COPY: Copy the packet to the CSME. + * @SAP_ETH_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_eth_filter_flags { + SAP_ETH_FILTER_STOP = BIT(0), + SAP_ETH_FILTER_COPY = BIT(1), + SAP_ETH_FILTER_ENABLED = BIT(2), +}; + +/** + * struct iwl_sap_eth_filter - a L2 filter + * @mac_address: Address to filter. + * @flags: See &enum iwl_sap_eth_filter_flags. + */ +struct iwl_sap_eth_filter { + u8 mac_address[6]; + u8 flags; +} __packed; + +/** + * enum iwl_sap_flex_filter_flags - used in &struct iwl_sap_flex_filter + * @SAP_FLEX_FILTER_COPY: Pass UDP / TCP packets to CSME. + * @SAP_FLEX_FILTER_ENABLED: If false, the filter should be ignored. + * @SAP_FLEX_FILTER_IPV4: Filter requires match on the IP address as well. + * @SAP_FLEX_FILTER_IPV6: Filter requires match on the IP address as well. + * @SAP_FLEX_FILTER_TCP: Filter should be applied on TCP packets. + * @SAP_FLEX_FILTER_UDP: Filter should be applied on UDP packets. + */ +enum iwl_sap_flex_filter_flags { + SAP_FLEX_FILTER_COPY = BIT(0), + SAP_FLEX_FILTER_ENABLED = BIT(1), + SAP_FLEX_FILTER_IPV6 = BIT(2), + SAP_FLEX_FILTER_IPV4 = BIT(3), + SAP_FLEX_FILTER_TCP = BIT(4), + SAP_FLEX_FILTER_UDP = BIT(5), +}; + +/** + * struct iwl_sap_flex_filter - + * @src_port: Source port in network format. + * @dst_port: Destination port in network format. + * @flags: Flags and protocol, see &enum iwl_sap_flex_filter_flags. + * @reserved: For alignment. + */ +struct iwl_sap_flex_filter { + __be16 src_port; + __be16 dst_port; + u8 flags; + u8 reserved; +} __packed; + +/** + * enum iwl_sap_ipv4_filter_flags - used in &struct iwl_sap_ipv4_filter + * @SAP_IPV4_FILTER_ICMP_PASS: Pass ICMP packets to CSME. + * @SAP_IPV4_FILTER_ICMP_COPY: Pass ICMP packets to host. + * @SAP_IPV4_FILTER_ARP_REQ_PASS: Pass ARP requests to CSME. + * @SAP_IPV4_FILTER_ARP_REQ_COPY: Pass ARP requests to host. + * @SAP_IPV4_FILTER_ARP_RESP_PASS: Pass ARP responses to CSME. + * @SAP_IPV4_FILTER_ARP_RESP_COPY: Pass ARP responses to host. + */ +enum iwl_sap_ipv4_filter_flags { + SAP_IPV4_FILTER_ICMP_PASS = BIT(0), + SAP_IPV4_FILTER_ICMP_COPY = BIT(1), + SAP_IPV4_FILTER_ARP_REQ_PASS = BIT(2), + SAP_IPV4_FILTER_ARP_REQ_COPY = BIT(3), + SAP_IPV4_FILTER_ARP_RESP_PASS = BIT(4), + SAP_IPV4_FILTER_ARP_RESP_COPY = BIT(5), +}; + +/** + * struct iwl_sap_ipv4_filter- + * @ipv4_addr: The IP address to filer. + * @flags: See &enum iwl_sap_ipv4_filter_flags. + */ +struct iwl_sap_ipv4_filter { + __be32 ipv4_addr; + __le32 flags; +} __packed; + +/** + * enum iwl_sap_ipv6_filter_flags - + * @SAP_IPV6_ADDR_FILTER_COPY: Pass packets to the host. + * @SAP_IPV6_ADDR_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_ipv6_filter_flags { + SAP_IPV6_ADDR_FILTER_COPY = BIT(0), + SAP_IPV6_ADDR_FILTER_ENABLED = BIT(1), +}; + +/** + * struct iwl_sap_ipv6_filter - + * @addr_lo24: Lowest 24 bits of the IPv6 address. + * @flags: See &enum iwl_sap_ipv6_filter_flags. + */ +struct iwl_sap_ipv6_filter { + u8 addr_lo24[3]; + u8 flags; +} __packed; + +/** + * enum iwl_sap_icmpv6_filter_flags - + * @SAP_ICMPV6_FILTER_ENABLED: If false, the filter should be ignored. + * @SAP_ICMPV6_FILTER_COPY: Pass packets to the host. + */ +enum iwl_sap_icmpv6_filter_flags { + SAP_ICMPV6_FILTER_ENABLED = BIT(0), + SAP_ICMPV6_FILTER_COPY = BIT(1), +}; + +/** + * enum iwl_sap_vlan_filter_flags - + * @SAP_VLAN_FILTER_VLAN_ID_MSK: TBD + * @SAP_VLAN_FILTER_ENABLED: If false, the filter should be ignored. + */ +enum iwl_sap_vlan_filter_flags { + SAP_VLAN_FILTER_VLAN_ID_MSK = 0x0FFF, + SAP_VLAN_FILTER_ENABLED = BIT(15), +}; + +/** + * struct iwl_sap_oob_filters - Out of band filters (for RX only) + * @flex_filters: Array of &struct iwl_sap_flex_filter. + * @icmpv6_flags: See &enum iwl_sap_icmpv6_filter_flags. + * @ipv6_filters: Array of &struct iwl_sap_ipv6_filter. + * @eth_filters: Array of &struct iwl_sap_eth_filter. + * @reserved: For alignment. + * @ipv4_filter: &struct iwl_sap_ipv4_filter. + * @vlan: See &enum iwl_sap_vlan_filter_flags. + */ +struct iwl_sap_oob_filters { + struct iwl_sap_flex_filter flex_filters[14]; + __le32 icmpv6_flags; + struct iwl_sap_ipv6_filter ipv6_filters[4]; + struct iwl_sap_eth_filter eth_filters[5]; + u8 reserved; + struct iwl_sap_ipv4_filter ipv4_filter; + __le16 vlan[4]; +} __packed; + +/** + * struct iwl_sap_csme_filters - payload of %SAP_MSG_NOTIF_CSME_FILTERS + * @hdr: The SAP header. + * @mode: Not used. + * @mac_address: Not used. + * @reserved: For alignment. + * @cbfilters: Not used. + * @filters: Out of band filters. + */ +struct iwl_sap_csme_filters { + struct iwl_sap_hdr hdr; + __le32 mode; + u8 mac_address[6]; + __le16 reserved; + u8 cbfilters[1728]; + struct iwl_sap_oob_filters filters; +} __packed; + +#define CB_TX_DHCP_FILT_IDX 30 +/** + * struct iwl_sap_cb_data - header to be added for transmitted packets. + * @hdr: The SAP header. + * @reserved: Not used. + * @to_me_filt_status: The filter that matches. Bit %CB_TX_DHCP_FILT_IDX should + * be set for DHCP (the only packet that uses this header). + * @reserved2: Not used. + * @data_len: The length of the payload. + * @payload: The payload of the transmitted packet. + */ +struct iwl_sap_cb_data { + struct iwl_sap_hdr hdr; + __le32 reserved[7]; + __le32 to_me_filt_status; + __le32 reserved2; + __le32 data_len; + u8 payload[]; +}; + +#endif /* __sap_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h new file mode 100644 index 000000000000..83639c6225ca --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace-data.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2021 Intel Corporation + */ + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + +#define trace_iwlmei_sap_data(...) + +#else + +#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA) || defined(TRACE_HEADER_MULTI_READ) + +#ifndef __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA +enum iwl_sap_data_trace_type { + IWL_SAP_RX_DATA_TO_AIR, + IWL_SAP_TX_DATA_FROM_AIR, + IWL_SAP_RX_DATA_DROPPED_FROM_AIR, + IWL_SAP_TX_DHCP, +}; + +static inline size_t +iwlmei_sap_data_offset(enum iwl_sap_data_trace_type trace_type) +{ + switch (trace_type) { + case IWL_SAP_RX_DATA_TO_AIR: + return 0; + case IWL_SAP_TX_DATA_FROM_AIR: + case IWL_SAP_RX_DATA_DROPPED_FROM_AIR: + return sizeof(struct iwl_sap_hdr); + case IWL_SAP_TX_DHCP: + return sizeof(struct iwl_sap_cb_data); + default: + WARN_ON_ONCE(1); + } + + return 0; +} +#endif + +#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA + +#include <linux/tracepoint.h> +#include <linux/skbuff.h> +#include "sap.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlmei_sap_data + +TRACE_EVENT(iwlmei_sap_data, + TP_PROTO(const struct sk_buff *skb, + enum iwl_sap_data_trace_type trace_type), + TP_ARGS(skb, trace_type), + TP_STRUCT__entry( + __dynamic_array(u8, data, + skb->len - iwlmei_sap_data_offset(trace_type)) + __field(u32, trace_type) + ), + TP_fast_assign( + size_t offset = iwlmei_sap_data_offset(trace_type); + __entry->trace_type = trace_type; + skb_copy_bits(skb, offset, __get_dynamic_array(data), + skb->len - offset); + ), + TP_printk("sap_data:trace_type %d len %d", + __entry->trace_type, __get_dynamic_array_len(data)) +); + +/* + * If you add something here, add a stub in case + * !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + */ + +#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_DATA */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-data +#include <trace/define_trace.h> + +#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.c b/drivers/net/wireless/intel/iwlwifi/mei/trace.c new file mode 100644 index 000000000000..47ac32ef9f69 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/module.h> + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "trace-data.h" + +#endif /* __CHECKER__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/trace.h b/drivers/net/wireless/intel/iwlwifi/mei/trace.h new file mode 100644 index 000000000000..45ecb22ec84a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mei/trace.h @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright(c) 2021 Intel Corporation + */ + +#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + +#define trace_iwlmei_sap_cmd(...) +#define trace_iwlmei_me_msg(...) + +#else + +#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlmei_sap_cmd + +#include "mei/sap.h" + +TRACE_EVENT(iwlmei_sap_cmd, + TP_PROTO(const struct iwl_sap_hdr *sap_cmd, bool tx), + TP_ARGS(sap_cmd, tx), + TP_STRUCT__entry( + __dynamic_array(u8, cmd, + le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd)) + __field(u8, tx) + __field(u16, type) + __field(u16, len) + __field(u32, seq) + ), + TP_fast_assign( + memcpy(__get_dynamic_array(cmd), sap_cmd, + le16_to_cpu(sap_cmd->len) + sizeof(*sap_cmd)); + __entry->tx = tx; + __entry->type = le16_to_cpu(sap_cmd->type); + __entry->len = le16_to_cpu(sap_cmd->len); + __entry->seq = le32_to_cpu(sap_cmd->seq_num); + ), + TP_printk("sap_cmd %s: type %d len %d seq %d", __entry->tx ? "Tx" : "Rx", + __entry->type, __entry->len, __entry->seq) +); + +TRACE_EVENT(iwlmei_me_msg, + TP_PROTO(const struct iwl_sap_me_msg_hdr *hdr, bool tx), + TP_ARGS(hdr, tx), + TP_STRUCT__entry( + __field(u8, type) + __field(u8, tx) + __field(u32, seq_num) + ), + TP_fast_assign( + __entry->type = le32_to_cpu(hdr->type); + __entry->seq_num = le32_to_cpu(hdr->seq_num); + __entry->tx = tx; + ), + TP_printk("ME message: %s: type %d seq %d", __entry->tx ? "Tx" : "Rx", + __entry->type, __entry->seq_num) +); + +/* + * If you add something here, add a stub in case + * !defined(CONFIG_IWLWIFI_DEVICE_TRACING) + */ + +#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI_SAP_CMD */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> + +#endif /* CONFIG_IWLWIFI_DEVICE_TRACING */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 75fc2d935e5d..11e814b7cad0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -10,5 +10,6 @@ iwlmvm-y += rfi.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM) += d3.o +iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 9d0d01f27d92..c604f9f39b24 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -107,7 +107,7 @@ #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true -#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 #define IWL_MVM_USE_NSSN_SYNC 0 #define IWL_MVM_PHY_FILTER_CHAIN_A 0 #define IWL_MVM_PHY_FILTER_CHAIN_B 0 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index a19f646a324f..b400867e94f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1391,6 +1391,13 @@ struct iwl_wowlan_status_data { u8 tid_tear_down; struct { + /* including RX MIC key for TKIP */ + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + } gtk; + + struct { /* * We store both the TKIP and AES representations * coming from the firmware because we decode the @@ -1400,11 +1407,15 @@ struct iwl_wowlan_status_data { struct { struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; } tkip, aes; - /* including RX MIC key for TKIP */ - u8 key[WOWLAN_KEY_MAX_SIZE]; - u8 len; - u8 flags; - } gtk; + + /* + * We use -1 for when we have valid data but don't know + * the key ID from firmware, and thus it needs to be + * installed with the last key (depending on rekeying). + */ + s8 key_id; + bool valid; + } gtk_seq[2]; struct { /* Same as above */ @@ -1556,12 +1567,10 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, kfree_skb(pkt); } -static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, - struct ieee80211_key_seq *seq) +static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq) { - u64 pn; + u64 pn = le64_to_cpu(le_pn); - pn = le64_to_cpu(sc->pn); seq->ccmp.pn[0] = pn >> 40; seq->ccmp.pn[1] = pn >> 32; seq->ccmp.pn[2] = pn >> 24; @@ -1570,6 +1579,20 @@ static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, seq->ccmp.pn[5] = pn; } +static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, + struct ieee80211_key_seq *seq) +{ + iwl_mvm_le64_to_aes_seq(sc->pn, seq); +} + +static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq) +{ + u64 pn = le64_to_cpu(le_pn); + + seq->tkip.iv16 = (u16)pn; + seq->tkip.iv32 = (u32)(pn >> 16); +} + static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, struct ieee80211_key_seq *seq) { @@ -1630,10 +1653,12 @@ static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, /* GTK RX counters */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i], - &status->gtk.tkip.seq[i]); + &status->gtk_seq[0].tkip.seq[i]); iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i], - &status->gtk.aes.seq[i]); + &status->gtk_seq[0].aes.seq[i]); } + status->gtk_seq[0].valid = true; + status->gtk_seq[0].key_id = -1; /* PTK TX counter */ status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) | @@ -1649,24 +1674,103 @@ static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, } } -static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, - struct ieee80211_key_conf *key, - struct iwl_wowlan_status_data *status) +static void +iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_all_rsc_tsc_v5 *sc, + unsigned int idx, unsigned int key_id) +{ + int tid; + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid], + &status->gtk_seq[idx].tkip.seq[tid]); + iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid], + &status->gtk_seq[idx].aes.seq[tid]); + } + + status->gtk_seq[idx].valid = true; + status->gtk_seq[idx].key_id = key_id; +} + +static void +iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_all_rsc_tsc_v5 *sc) +{ + int i, tid; + + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); + BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq)); + + /* GTK RX counters */ + for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) { + u8 entry = sc->mcast_key_id_map[i]; + + if (entry < ARRAY_SIZE(sc->mcast_rsc)) + iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc, + entry, i); + } + + /* PTK TX counters not needed, assigned in device */ + + /* PTK RX counters */ + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid], + &status->ptk.tkip.seq[tid]); + iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid], + &status->ptk.aes.seq[tid]); + } +} + +static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key, + struct iwl_wowlan_status_data *status, + int idx) { switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: - iwl_mvm_set_key_rx_seq_tids(key, status->gtk.aes.seq); + iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq); break; case WLAN_CIPHER_SUITE_TKIP: - iwl_mvm_set_key_rx_seq_tids(key, status->gtk.tkip.seq); + iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq); break; default: WARN_ON(1); } } +static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, + struct iwl_wowlan_status_data *status, + bool installed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) { + if (!status->gtk_seq[i].valid) + continue; + + /* Handle the case where we know the key ID */ + if (status->gtk_seq[i].key_id == key->keyidx) { + s8 new_key_id = -1; + + if (status->num_of_gtk_rekeys) + new_key_id = status->gtk.flags & + IWL_WOWLAN_GTK_IDX_MASK; + + /* Don't install a new key's value to an old key */ + if (new_key_id != key->keyidx) + iwl_mvm_set_key_rx_seq_idx(key, status, i); + continue; + } + + /* handle the case where we didn't, last key only */ + if (status->gtk_seq[i].key_id == -1 && + (!status->num_of_gtk_rekeys || installed)) + iwl_mvm_set_key_rx_seq_idx(key, status, i); + } +} + struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; @@ -1740,8 +1844,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, if (data->status->num_of_gtk_rekeys) ieee80211_remove_key(key); - else if (data->last_gtk == key) - iwl_mvm_set_key_rx_seq(data->mvm, key, data->status); + + if (data->last_gtk == key) + iwl_mvm_set_key_rx_seq(key, data->status, false); } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, @@ -1825,7 +1930,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, key = ieee80211_gtk_rekey_add(vif, &conf.conf); if (IS_ERR(key)) return false; - iwl_mvm_set_key_rx_seq(mvm, key, status); + iwl_mvm_set_key_rx_seq(key, status, true); replay_ctr = cpu_to_be64(status->replay_ctr); @@ -1893,9 +1998,10 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ iwl_mvm_parse_wowlan_status_common(v6) iwl_mvm_parse_wowlan_status_common(v7) iwl_mvm_parse_wowlan_status_common(v9) +iwl_mvm_parse_wowlan_status_common(v12) -static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_gtk_status *data) +static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v2 *data) { BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + @@ -1913,6 +2019,26 @@ static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status, data->tkip_mic_key, sizeof(data->tkip_mic_key)); } +static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v3 *data) +{ + /* The parts we need are identical in v2 and v3 */ +#define CHECK(_f) do { \ + BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \ + offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \ + BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\ + offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \ +} while (0) + + CHECK(key); + CHECK(key_len); + CHECK(key_flags); + CHECK(tkip_mic_key); +#undef CHECK + + iwl_mvm_convert_gtk_v2(status, (void *)data); +} + static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, struct iwl_wowlan_igtk_status *data) { @@ -2012,7 +2138,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); - iwl_mvm_convert_gtk(status, &v7->gtk[0]); + iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]); iwl_mvm_convert_igtk(status, &v7->igtk[0]); } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; @@ -2025,10 +2151,22 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); - iwl_mvm_convert_gtk(status, &v9->gtk[0]); + iwl_mvm_convert_gtk_v2(status, &v9->gtk[0]); iwl_mvm_convert_igtk(status, &v9->igtk[0]); status->tid_tear_down = v9->tid_tear_down; + } else if (notif_ver == 12) { + struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; + + status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); + if (IS_ERR(status)) + goto out_free_resp; + + iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]); + iwl_mvm_convert_igtk(status, &v12->igtk[0]); + + status->tid_tear_down = v12->tid_tear_down; } else { IWL_ERR(mvm, "Firmware advertises unknown WoWLAN status response %d!\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index ff66001d507e..fb4920b01dbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1022,6 +1022,11 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, if (mvm->fw_restart >= 0) mvm->fw_restart++; + if (count == 6 && !strcmp(buf, "nolog\n")) { + set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); + set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); + } + /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LONG_GROUP, REPLY_ERROR), @@ -1038,6 +1043,9 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EIO; + if (count == 6 && !strcmp(buf, "nolog\n")) + set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); + iwl_force_nmi(mvm->trans); return count; @@ -2064,6 +2072,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 949fb790f8fb..628aee634b2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -511,7 +511,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (sta->mfp) + if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) FTM_PUT_FLAG(PMF); rcu_read_unlock(); @@ -1066,7 +1066,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; - rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100; + rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 73a82f07dc59..083f86fa5017 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -15,7 +15,7 @@ #include "fw/api/datapath.h" #include "fw/api/phy.h" #include "fw/api/config.h" -#include "fw/api/soc.h" +#include "fw/api/system.h" #include "fw/api/alive.h" #include "fw/api/binding.h" #include "fw/api/cmdhdr.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 863fec150e53..6f4690e56a46 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -12,8 +12,6 @@ #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" -#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ -#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" @@ -32,6 +30,9 @@ #define IWL_PPAG_MASK 3 #define IWL_PPAG_ETSI_MASK BIT(0) +#define IWL_TAS_US_MCC 0x5553 +#define IWL_TAS_CANADA_MCC 0x4341 + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -123,13 +124,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_lmac_alive *lmac2 = NULL; u16 status; u32 lmac_error_event_table, umac_error_table; + u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + UCODE_ALIVE_NTFY, 0); /* * For v5 and above, we can check the version, for older * versions we need to check the size. */ - if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - UCODE_ALIVE_NTFY, 0) == 5) { + if (version == 5 || version == 6) { + /* v5 and v6 are compatible (only IMR addition) */ struct iwl_alive_ntf_v5 *palive; if (pkt_len < sizeof(*palive)) @@ -516,7 +519,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D); } } - #else /* CONFIG_ACPI */ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, @@ -525,6 +527,49 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, } #endif /* CONFIG_ACPI */ +#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.sgom_table, + .len[0] = sizeof(mvm->fwrt.sgom_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mvm->fwrt.sgom_enabled) { + IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); + return 0; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != 2) { + IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", + cmd_ver); + return 0; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); + + return ret; +} +#else + +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif + static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; @@ -757,6 +802,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) if (ret) return ret; + iwl_mei_set_power_limit(per_chain); + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -820,6 +867,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) u16 len; u32 n_bands; u32 n_profiles; + u32 sk = 0; int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD, @@ -879,19 +927,26 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) if (ret) return 0; + /* Only set to South Korea if the table revision is 1 */ + if (mvm->fwrt.geo_rev == 1) + sk = 1; + /* - * Set the revision on versions that contain it. + * Set the table_revision to South Korea (1) or not (0). The + * element name is misleading, as it doesn't contain the table + * revision number, but whether the South Korea variation + * should be used. * This must be done after calling iwl_sar_geo_init(). */ if (cmd_ver == 5) - cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v5.table_revision = cpu_to_le32(sk); else if (cmd_ver == 4) - cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v4.table_revision = cpu_to_le32(sk); else if (cmd_ver == 3) - cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v3.table_revision = cpu_to_le32(sk); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) - cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v2.table_revision = cpu_to_le32(sk); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, @@ -904,13 +959,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands; int idx = 2; - s8 *gain; - /* - * The 'flags' field is the same in v1 and in v2 so we can just - * use v1 to access it. - */ - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + mvm->fwrt.ppag_flags = 0; data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) @@ -922,8 +972,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) if (!IS_ERR(wifi_pkg)) { if (tbl_rev == 1 || tbl_rev == 2) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - mvm->fwrt.ppag_ver = tbl_rev; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=%d)\n", tbl_rev); @@ -943,8 +991,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; } num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = mvm->fwrt.ppag_table.v1.gain[0]; - mvm->fwrt.ppag_ver = 0; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); goto read_table; } @@ -952,6 +998,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; read_table: + mvm->fwrt.ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; if (flags->type != ACPI_TYPE_INTEGER) { @@ -959,10 +1006,9 @@ read_table: goto out_free; } - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value & - IWL_PPAG_MASK); + mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK; - if (!mvm->fwrt.ppag_table.v1.flags) { + if (!mvm->fwrt.ppag_flags) { ret = 0; goto out_free; } @@ -982,15 +1028,15 @@ read_table: goto out_free; } - gain[i * num_sub_bands + j] = ent->integer.value; + mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value; if ((j == 0 && - (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB || - gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) || + (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || + mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || (j != 0 && - (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB || - gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) { - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || + mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { + mvm->fwrt.ppag_flags = 0; ret = -EINVAL; goto out_free; } @@ -1005,6 +1051,7 @@ out_free: int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { + union iwl_ppag_table_cmd cmd; u8 cmd_ver; int i, j, ret, num_sub_bands, cmd_size; s8 *gain; @@ -1014,37 +1061,39 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) "PPAG capability not supported by FW, command not sent.\n"); return 0; } - if (!mvm->fwrt.ppag_table.v1.flags) { + if (!mvm->fwrt.ppag_flags) { IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); return 0; } + /* The 'flags' field is the same in v1 and in v2 so we can just + * use v1 to access it. + */ + cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags); cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) { num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = mvm->fwrt.ppag_table.v1.gain[0]; - cmd_size = sizeof(mvm->fwrt.ppag_table.v1); + gain = cmd.v1.gain[0]; + cmd_size = sizeof(cmd.v1); if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, "PPAG table rev is %d but FW supports v1, sending truncated table\n", mvm->fwrt.ppag_ver); - mvm->fwrt.ppag_table.v1.flags &= - cpu_to_le32(IWL_PPAG_ETSI_MASK); + cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else if (cmd_ver == 2 || cmd_ver == 3) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - cmd_size = sizeof(mvm->fwrt.ppag_table.v2); + gain = cmd.v2.gain[0]; + cmd_size = sizeof(cmd.v2); if (mvm->fwrt.ppag_ver == 0) { IWL_DEBUG_RADIO(mvm, "PPAG table is v1 but FW supports v2, sending padded table\n"); } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); - mvm->fwrt.ppag_table.v1.flags &= - cpu_to_le32(IWL_PPAG_ETSI_MASK); + cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else { IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); @@ -1053,6 +1102,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { + gain[i * num_sub_bands + j] = + mvm->fwrt.ppag_chains[i].subbands[j]; IWL_DEBUG_RADIO(mvm, "PPAG table: chain[%d] band[%d]: gain = %d\n", i, j, gain[i * num_sub_bands + j]); @@ -1061,7 +1112,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - 0, cmd_size, &mvm->fwrt.ppag_table); + 0, cmd_size, &cmd); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); @@ -1100,18 +1151,63 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", dmi_get_system_info(DMI_SYS_VENDOR)); - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + mvm->fwrt.ppag_flags = 0; return 0; } return iwl_mvm_ppag_send_cmd(mvm); } +static const struct dmi_system_id dmi_tas_approved_list[] = { + { .ident = "HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + }, + }, + { .ident = "SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "LENOVO", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + + /* keep last */ + {} +}; + +static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) +{ + int i; + u32 size = le32_to_cpu(*le_size); + + /* Verify that there is room for another country */ + if (size >= IWL_TAS_BLOCK_LIST_MAX) + return false; + + for (i = 0; i < size; i++) { + if (list[i] == cpu_to_le32(mcc)) + return true; + } + + list[size++] = cpu_to_le32(mcc); + *le_size = cpu_to_le32(size); + return true; +} + static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { int ret; - struct iwl_tas_config_cmd cmd = {}; - int list_size; + struct iwl_tas_config_cmd_v3 cmd = {}; + int cmd_size; BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) < APCI_WTAS_BLACK_LIST_MAX); @@ -1121,7 +1217,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.block_list_array, &list_size); + ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", @@ -1129,15 +1225,32 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - if (list_size < 0) + if (ret == 0) return; - /* list size if TAS enabled can only be non-negative */ - cmd.block_list_size = cpu_to_le32((u32)list_size); + if (!dmi_check_system(dmi_tas_approved_list)) { + IWL_DEBUG_RADIO(mvm, + "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", + dmi_get_system_info(DMI_SYS_VENDOR)); + if ((!iwl_mvm_add_to_tas_block_list(cmd.block_list_array, + &cmd.block_list_size, IWL_TAS_US_MCC)) || + (!iwl_mvm_add_to_tas_block_list(cmd.block_list_array, + &cmd.block_list_size, IWL_TAS_CANADA_MCC))) { + IWL_DEBUG_RADIO(mvm, + "Unable to add US/Canada to TAS block list, disabling TAS\n"); + return; + } + } + + cmd_size = iwl_fw_lookup_cmd_ver(mvm->fw, REGULATORY_AND_NVM_GROUP, + TAS_CONFIG, + IWL_FW_CMD_VER_UNKNOWN) < 3 ? + sizeof(struct iwl_tas_config_cmd_v2) : + sizeof(struct iwl_tas_config_cmd_v3); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG), - 0, sizeof(cmd), &cmd); + 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } @@ -1336,6 +1449,7 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { } + #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1401,7 +1515,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); - WARN_ON(!mvm->nvm_data); ret = iwl_run_init_mvm_ucode(mvm); if (ret) { @@ -1631,6 +1744,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) else if (ret < 0) goto error; + ret = iwl_mvm_sgom_init(mvm); + if (ret) + goto error; + iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); @@ -1705,20 +1822,6 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return ret; } -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; - u32 flags = le32_to_cpu(card_state_notif->flags); - - IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_KILL_CARD_DISABLED) ? - "Reached" : "Not reached"); -} - void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9fb9c7dad314..65f4fe3ef504 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,6 +16,7 @@ #include <net/ieee80211_radiotap.h> #include <net/tcp.h> +#include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" @@ -190,6 +191,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); + resp = NULL; goto out; } @@ -211,7 +213,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; - kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); @@ -223,7 +224,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, mvm->lar_regdom_set = true; mvm->mcc_src = src_id; + iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); + out: + kfree(resp); return regd; } @@ -637,14 +641,21 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } if (iwl_mvm_is_oce_supported(mvm)) { + u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + IWL_ALWAYS_LONG_GROUP, + SCAN_REQ_UMAC, 0); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); - wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); + + /* Old firmware also supports probe deferral and suppression */ + if (scan_ver < 15) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } if (mvm->nvm_data->sku_cap_11ax_enable && @@ -706,8 +717,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) - hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | - NETIF_F_RXCSUM); + hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, @@ -717,6 +727,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); + iwl_mvm_vendor_cmds_register(mvm); + hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); @@ -1083,6 +1095,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_mei_get_ownership(mvm); + if (ret) + return ret; + + if (mvm->mei_nvm_data) { + /* We got the NIC, we can now free the MEI NVM data */ + kfree(mvm->mei_nvm_data); + mvm->mei_nvm_data = NULL; + + /* + * We can't free the nvm_data we allocated based on the SAP + * data because we registered to cfg80211 with the channels + * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data + * just in order to be able free it later. + * NULLify nvm_data so that we will read the NVM from the + * firmware this time. + */ + mvm->temp_nvm_data = mvm->nvm_data; + mvm->nvm_data = NULL; + } + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART @@ -1117,11 +1150,34 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + int retry, max_retry = 0; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); + + /* we are starting the mac not in error flow, and restart is enabled */ + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && + iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; + /* + * This will prevent mac80211 recovery flows to trigger during + * init failures + */ + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + } + + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + mutex_unlock(&mvm->mutex); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return ret; } @@ -1239,6 +1295,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); @@ -1509,6 +1567,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !mvm->csme_vif && mvm->mei_registered) { + iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); + iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); + mvm->csme_vif = vif; + } + goto out_unlock; out_unbind: @@ -1561,6 +1628,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (vif == mvm->csme_vif) { + iwl_mei_set_netdev(NULL); + mvm->csme_vif = NULL; + } + probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); @@ -1666,6 +1738,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1675,6 +1748,22 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); + + /* + * Send a (synchronous) ech command so that we wait for the + * multiple asynchronous MCAST_FILTER_CMD commands sent by + * the interface iterator. Otherwise, we might get here over + * and over again (by userspace just sending a lot of these) + * and the CPU can send them faster than the firmware can + * process them. + * Note that the CPU is still faster - but with this we'll + * actually send fewer commands overall because the CPU will + * not schedule the work in mac80211 as frequently if it's + * still running when rescheduled (possibly multiple times). + */ + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + if (ret) + IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, @@ -2126,24 +2215,24 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, } flags |= STA_CTXT_HE_PACKET_EXT; - } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { + } else if (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK) + != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) { int low_th = -1; int high_th = -1; /* Take the PPE thresholds from the nominal padding info */ - switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: + switch (u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; @@ -2371,6 +2460,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { + iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. @@ -3107,6 +3197,69 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, } } +static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ +#if IS_ENABLED(CONFIG_IWLMEI) + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mei_conn_info conn_info = { + .ssid_len = vif->bss_conf.ssid_len, + .channel = vif->bss_conf.chandef.chan->hw_value, + }; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + if (!mvm->mei_registered) + return; + + switch (mvm_sta->pairwise_cipher) { + case WLAN_CIPHER_SUITE_CCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; + break; + case 0: + /* open profile */ + break; + default: + /* cipher not supported, don't send anything to iwlmei */ + return; + } + + switch (mvmvif->rekey_data.akm) { + case WLAN_AKM_SUITE_SAE & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; + break; + case WLAN_AKM_SUITE_PSK & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; + break; + case WLAN_AKM_SUITE_8021X & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; + break; + case 0: + /* open profile */ + conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; + break; + default: + /* auth method / AKM not supported */ + /* TODO: All the FT vesions of these? */ + return; + } + + memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); + memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); + + /* TODO: add support for collocated AP data */ + iwl_mei_host_associated(&conn_info, NULL); +#endif +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3251,12 +3404,18 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* once we move into assoc state, need to update rate scale to + * disable using wide bandwidth + */ + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); if (!sta->tdls) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); @@ -3279,16 +3438,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { - /* remove session protection if still running */ + } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); - } ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + iwl_mvm_stop_session_protection(mvm, vif); ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); @@ -3454,12 +3613,15 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_sta *mvmsta = NULL; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; + if (sta) + mvmsta = iwl_mvm_sta_from_mac80211(sta); + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { @@ -3560,7 +3722,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - sta && iwl_mvm_has_new_rx_api(mvm) && + mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || @@ -3568,7 +3730,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct ieee80211_key_seq seq; int tid, q; - mvmsta = iwl_mvm_sta_from_mac80211(sta); WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), @@ -3595,6 +3756,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, else key_offset = STA_KEY_IDX_INVALID; + if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + mvmsta->pairwise_cipher = key->cipher; + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { @@ -3635,12 +3799,11 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } - if (sta && iwl_mvm_has_new_rx_api(mvm) && + if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); @@ -5360,6 +5523,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + return iwl_mvm_tx_csum_bz(mvm, head, true) == + iwl_mvm_tx_csum_bz(mvm, skb, true); + /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2b1dcd60e00f..1dcbb0eb63c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -30,6 +30,7 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" +#include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include <linux/average.h> @@ -93,11 +94,10 @@ struct iwl_mvm_phy_ctxt { enum nl80211_chan_width width; - /* - * TODO: This should probably be removed. Currently here only for rate - * scaling algorithm - */ struct ieee80211_channel *channel; + + /* track for RLC config command */ + u32 center_freq1; }; struct iwl_mvm_time_event_data { @@ -830,6 +830,18 @@ struct iwl_mvm { const char *nvm_file_name; struct iwl_nvm_data *nvm_data; + struct iwl_mei_nvm *mei_nvm_data; + struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; + bool mei_rfkill_blocked; + bool mei_registered; + struct work_struct sap_connected_wk; + + /* + * NVM built based on the SAP data but that we can't free even after + * we get ownership because it contains the cfg80211's channel. + */ + struct iwl_nvm_data *temp_nvm_data; + /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; @@ -1021,6 +1033,8 @@ struct iwl_mvm { /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; + /* This vif used by CSME to send / receive traffic */ + struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; @@ -1123,6 +1137,10 @@ struct iwl_mvm { * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) + * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log + * if this is set, when intentionally triggered + * @IWL_MVM_STATUS_STARTING: starting mac, + * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1134,6 +1152,13 @@ enum iwl_mvm_status { IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, + IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, + IWL_MVM_STATUS_STARTING, +}; + +struct iwl_mvm_csme_conn_info { + struct rcu_head rcu_head; + struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ @@ -1493,6 +1518,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); +u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); @@ -1598,8 +1624,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, @@ -1939,6 +1963,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); +#if IS_ENABLED(CONFIG_IWLMEI) + +/* vendor commands */ +void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); + +#else + +static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} + +#endif + /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, @@ -2158,4 +2193,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) return IWL_LOCATION_CIPHER_INVALID; } } + +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); +static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + return iwl_mei_get_ownership(); + return 0; +} + +static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, + struct sk_buff *skb, + unsigned int ivlen) +{ + if (mvm->mei_registered) + iwl_mei_tx_copy_to_csme(skb, ivlen); +} + +static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_host_disassociated(); +} + +static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_device_down(); +} + +static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) +{ + bool sw_rfkill = + mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + + if (mvm->mei_registered) + iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), + sw_rfkill); +} + +void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool forbidden); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 232ad531d612..87630d38dc52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -5,6 +5,7 @@ * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/module.h> +#include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <net/mac80211.h> @@ -26,6 +27,7 @@ #include "time-event.h" #include "fw-api.h" #include "fw/acpi.h" +#include "fw/uefi.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -78,7 +80,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; - u32 reg_val = 0; + u32 reg_val; u32 phy_config = iwl_mvm_get_phy_config(mvm); radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> @@ -89,10 +91,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) FW_PHY_CFG_RADIO_DASH_POS; /* SKU control */ - reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; - reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; + reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; @@ -117,8 +116,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | @@ -260,6 +258,7 @@ enum iwl_rx_handler_context { /** * struct iwl_rx_handlers handler for FW notification * @cmd_id: command id + * @min_size: minimum size to expect for the notification * @context: see &iwl_rx_handler_context * @fn: the function is called when notification is received */ @@ -334,9 +333,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), - RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, - RX_HANDLER_SYNC, struct iwl_card_state_notif), - RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), @@ -457,7 +453,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(STATISTICS_NOTIFICATION), HCMD_NAME(EOSP_NOTIFICATION), HCMD_NAME(REDUCE_TX_POWER_CMD), - HCMD_NAME(CARD_STATE_NOTIFICATION), HCMD_NAME(MISSED_BEACONS_NOTIFICATION), HCMD_NAME(TDLS_CONFIG_CMD), HCMD_NAME(MAC_PM_POWER_TABLE), @@ -502,6 +497,9 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), + HCMD_NAME(RFI_CONFIG_CMD), + HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), + HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), }; /* Please keep this array *SORTED* by hex value. @@ -534,6 +532,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), + HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), @@ -683,13 +682,45 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) { + struct iwl_trans *trans = mvm->trans; int ret; + if (trans->csme_own) { + if (WARN(!mvm->mei_registered, + "csme is owner, but we aren't registered to iwlmei\n")) + goto get_nvm_from_fw; + + mvm->mei_nvm_data = iwl_mei_get_nvm(); + if (mvm->mei_nvm_data) { + /* + * mvm->mei_nvm_data is set and because of that, + * we'll load the NVM from the FW when we'll get + * ownership. + */ + mvm->nvm_data = + iwl_parse_mei_nvm_data(trans, trans->cfg, + mvm->mei_nvm_data, mvm->fw); + return 0; + } + + IWL_ERR(mvm, + "Got a NULL NVM from CSME, trying to get it from the device\n"); + } + +get_nvm_from_fw: rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); - ret = iwl_run_init_mvm_ucode(mvm); + ret = iwl_trans_start_hw(mvm->trans); + if (ret) { + mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); + rtnl_unlock(); + return ret; + } + ret = iwl_run_init_mvm_ucode(mvm); if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!ret && iwl_mvm_is_lar_supported(mvm)) { @@ -701,9 +732,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); - if (ret < 0) + if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); return ret; @@ -711,6 +743,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) { + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; int ret; iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); @@ -718,10 +751,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) ret = iwl_mvm_mac_setup_register(mvm); if (ret) return ret; + mvm->hw_registered = true; iwl_mvm_dbgfs_register(mvm); + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); + + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return 0; } @@ -902,6 +942,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { .frob_mem = iwl_mvm_frob_mem, }; +static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) +{ + struct iwl_mvm *mvm = priv; + struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; + + /* + * This is protected by the guarantee that this function will not be + * called twice on two different threads + */ + prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); + + curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); + if (!curr_conn_info) + return; + + curr_conn_info->conn_info = *conn_info; + + rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); + + if (prev_conn_info) + kfree_rcu(prev_conn_info, rcu_head); +} + +static void iwl_mvm_mei_rfkill(void *priv, bool blocked) +{ + struct iwl_mvm *mvm = priv; + + mvm->mei_rfkill_blocked = blocked; + if (!mvm->hw_registered) + return; + + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); +} + +static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered || !mvm->csme_vif) + return; + + iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); +} + +static void iwl_mvm_sap_connected_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, sap_connected_wk); + int ret; + + ret = iwl_mvm_start_get_nvm(mvm); + if (ret) + goto out_free; + + ret = iwl_mvm_start_post_nvm(mvm); + if (ret) + goto out_free; + + return; + +out_free: + IWL_ERR(mvm, "Couldn't get started...\n"); + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + iwl_fw_flush_dumps(&mvm->fwrt); + iwl_mvm_thermal_exit(mvm); + iwl_fw_runtime_free(&mvm->fwrt); + iwl_phy_db_free(mvm->phy_db); + kfree(mvm->scan_cmd); + iwl_trans_op_mode_leave(mvm->trans); + kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + + ieee80211_free_hw(mvm->hw); +} + +static void iwl_mvm_mei_sap_connected(void *priv) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered) + schedule_work(&mvm->sap_connected_wk); +} + +static void iwl_mvm_mei_nic_stolen(void *priv) +{ + struct iwl_mvm *mvm = priv; + + rtnl_lock(); + cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); + rtnl_unlock(); +} + +static const struct iwl_mei_ops mei_ops = { + .me_conn_status = iwl_mvm_me_conn_status, + .rfkill = iwl_mvm_mei_rfkill, + .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, + .sap_connected = iwl_mvm_mei_sap_connected, + .nic_stolen = iwl_mvm_mei_nic_stolen, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -913,9 +1056,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, static const u8 no_reclaim_cmds[] = { TX_CMD, }; - int err, scan_size; + int scan_size; u32 min_backoff; - enum iwl_amsdu_size rb_size_default; + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station @@ -954,6 +1097,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); iwl_mvm_get_acpi_tables(mvm); + iwl_uefi_get_sgom_table(trans, &mvm->fwrt); mvm->init_status = 0; @@ -1015,6 +1159,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); + INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -1056,14 +1201,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - rb_size_default = IWL_AMSDU_2K; - else - rb_size_default = IWL_AMSDU_4K; - switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: - trans_cfg.rx_buf_size = rb_size_default; + trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; @@ -1077,7 +1217,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); - trans_cfg.rx_buf_size = rb_size_default; + trans_cfg.rx_buf_size = IWL_AMSDU_4K; } trans->wide_cmd_header = true; @@ -1137,10 +1277,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); - err = iwl_trans_start_hw(mvm->trans); - if (err) - goto out_free; - scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); @@ -1165,8 +1301,20 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->debugfs_dir = dbgfs_dir; - if (iwl_mvm_start_get_nvm(mvm)) + mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); + + if (iwl_mvm_start_get_nvm(mvm)) { + /* + * Getting NVM failed while CSME is the owner, but we are + * registered to MEI, we'll get the NVM later when it'll be + * possible to get it from CSME. + */ + if (trans->csme_own && mvm->mei_registered) + return op_mode; + goto out_thermal_exit; + } + if (iwl_mvm_start_post_nvm(mvm)) goto out_thermal_exit; @@ -1175,6 +1323,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_thermal_exit: iwl_mvm_thermal_exit(mvm); + if (mvm->mei_registered) { + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + } out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); @@ -1201,6 +1353,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); + iwl_mvm_mei_device_down(mvm); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) @@ -1208,11 +1361,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; + if (mvm->mei_registered) { + rtnl_lock(); + iwl_mei_set_netdev(NULL); + rtnl_unlock(); + iwl_mei_start_unregister(); + } + + /* + * After we unregister from mei, the worker can't be scheduled + * anymore. + */ + cancel_work_sync(&mvm->sap_connected_wk); + iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); - ieee80211_unregister_hw(mvm->hw); + /* + * If we couldn't get ownership on the device and we couldn't + * get the NVM from CSME, we haven't registered to mac80211. + * In that case, we didn't fail op_mode_start, because we are + * waiting for CSME to allow us to get the NVM to register to + * mac80211. If that didn't happen, we haven't registered to + * mac80211, hence the if below. + */ + if (mvm->hw_registered) + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); @@ -1227,6 +1402,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) mvm->phy_db = NULL; kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + kfree(rcu_access_pointer(mvm->csme_conn_info)); + kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); @@ -1235,6 +1413,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); + if (mvm->mei_registered) + iwl_mei_unregister_complete(); + ieee80211_free_hw(mvm->hw); } @@ -1517,6 +1698,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) iwl_mvm_set_rfkill_state(mvm); } +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) +{ + return rcu_dereference_protected(mvm->csme_conn_info, + lockdep_is_held(&mvm->mutex)); +} + static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1600,6 +1787,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); + } else if (test_bit(IWL_MVM_STATUS_STARTING, + &mvm->status)) { + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1652,9 +1842,16 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) iwl_fw_error_collect(&mvm->fwrt, false); - if (fw_error && mvm->fw_restart > 0) + if (fw_error && mvm->fw_restart > 0) { mvm->fw_restart--; - ieee80211_restart_hw(mvm->hw); + ieee80211_restart_hw(mvm->hw); + } else if (mvm->fwrt.trans->dbg.restart_required) { + IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); + mvm->fwrt.trans->dbg.restart_required = FALSE; + ieee80211_restart_hw(mvm->hw); + } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { + ieee80211_restart_hw(mvm->hw); + } } } @@ -1662,7 +1859,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && + !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, + &mvm->status)) iwl_mvm_dump_nic_error_log(mvm); if (sync) { @@ -1683,7 +1882,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) return; - iwl_mvm_nic_restart(mvm, true); + iwl_mvm_nic_restart(mvm, false); } static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) @@ -1732,6 +1931,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + if (unlikely(queue >= mvm->trans->num_rx_queues)) + return; + if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 035336a9e755..9af40b0fa37a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -157,8 +157,43 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); - iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, + /* we only support RLC command version 2 */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, + RLC_CONFIG_CMD, 0) < 2) + iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, + chains_static, chains_dynamic); +} + +static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic) +{ + struct iwl_rlc_config_cmd cmd = { + .phy_id = cpu_to_le32(ctxt->id), + }; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, + RLC_CONFIG_CMD, 0) < 2) + return 0; + + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE != + PHY_RX_CHAIN_DRIVER_FORCE_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID != + PHY_RX_CHAIN_VALID_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE != + PHY_RX_CHAIN_FORCE_SEL_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO != + PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT != + PHY_RX_CHAIN_MIMO_CNT_MSK); + + iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info, chains_static, chains_dynamic); + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD, + DATA_PATH_GROUP, 2), + 0, sizeof(cmd), &cmd); } /* @@ -177,7 +212,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, PHY_CONTEXT_CMD, 1); - if (ver == 3) { + if (ver == 3 || ver == 4) { struct iwl_phy_context_cmd cmd = {}; /* Set the command header fields */ @@ -211,9 +246,16 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, } - if (ret) + if (ret) { IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); - return ret; + return ret; + } + + if (action != FW_CTXT_ACTION_REMOVE) + return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, + chains_dynamic); + + return 0; } /* @@ -228,6 +270,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; + ctxt->width = chandef->width; + ctxt->center_freq1 = chandef->center_freq1; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, @@ -257,6 +301,14 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); + if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, + RLC_CONFIG_CMD, 0) >= 2 && + ctxt->channel == chandef->chan && + ctxt->width == chandef->width && + ctxt->center_freq1 == chandef->center_freq1) + return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, + chains_dynamic); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && ctxt->channel->band != chandef->chan->band) { @@ -275,6 +327,8 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->channel = chandef->chan; ctxt->width = chandef->width; + ctxt->center_freq1 = chandef->center_freq1; + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index 44344216a1a9..f054ce76bed5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -7,39 +7,57 @@ #include "fw/api/commands.h" #include "fw/api/phy-ctxt.h" -/** +/* * DDR needs frequency in units of 16.666MHz, so provide FW with the * frequency values in the adjusted format. */ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { - /* LPDDR4 */ + /* frequency 2667MHz */ + {cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 2933MHz */ + {cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169, + 171, 173, 175}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 3200MHz */ + {cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93}, + {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, + PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 3733MHz */ - {cpu_to_le16(223), {114, 116, 118, 120, 122,}, - {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + {cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 4000MHz */ + {cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5,}}, /* frequency 4267MHz */ {cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, - /* DDR5ePOR */ - - /* frequency 4000MHz */ - {cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,}, - {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, - PHY_BAND_6, PHY_BAND_6,}}, - /* frequency 4400MHz */ {cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, - /* LPDDR5iPOR */ - /* frequency 5200MHz */ - {cpu_to_le16(312), {36, 38, 40, 42, 50,}, - {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + {cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 5600MHz */ + {cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 6000MHz */ {cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,}, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 958702403a45..66808c55aa0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -129,7 +129,7 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, const struct ieee80211_sta_vht_cap *vht_cap, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { u16 supp; int i, highest_mcs; @@ -154,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp); + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); /* * Check if VHT extended NSS indicates that the bandwidth/NSS * configuration is supported - only for MCS 0 since we already @@ -164,8 +164,8 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) - cmd->ht_rates[i][IWL_TLC_HT_BW_160] = - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160]; + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80]; } } @@ -189,7 +189,7 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); @@ -219,7 +219,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ @@ -230,14 +230,14 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; - cmd->ht_rates[i][IWL_TLC_HT_BW_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { int i; u16 supp = 0; @@ -263,15 +263,15 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; - cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = 0; else - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } @@ -291,8 +291,12 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, notif = (void *)pkt->data; sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", - notif->sta_id); + /* can happen in remove station flow where mvm removed internally + * the station before removing from FW + */ + IWL_DEBUG_RATE(mvm, + "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n", + notif->sta_id); goto out; } @@ -311,18 +315,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; - if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, - TLC_MNG_UPDATE_NOTIF, 0) < 3) { - rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate), - le32_to_cpu(notif->rate)); - IWL_DEBUG_RATE(mvm, - "Got rate in old format. Rate: %s. Converting.\n", - pretty_rate); - lq_sta->last_rate_n_flags = - iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); - } else { - lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); - } + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) < 3) { + rs_pretty_print_rate_v1(pretty_rate, + sizeof(pretty_rate), + le32_to_cpu(notif->rate)); + IWL_DEBUG_RATE(mvm, + "Got rate in old format. Rate: %s. Converting.\n", + pretty_rate); + lq_sta->last_rate_n_flags = + iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); + } else { + lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); + } rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); @@ -418,23 +423,18 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); - struct iwl_tlc_config_cmd cfg_cmd = { + struct iwl_tlc_config_cmd_v4 cfg_cmd = { .sta_id = mvmsta->sta_id, .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), - .max_mpdu_len = cpu_to_le16(max_amsdu_len), - .amsdu = iwl_mvm_is_csum_supported(mvm), + .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ? + cpu_to_le16(max_amsdu_len) : 0, }; int ret; - u16 cmd_size = sizeof(cfg_cmd); - - /* In old versions of the API the struct is 4 bytes smaller */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD, 0) < 3) - cmd_size -= 4; + int cmd_ver; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); @@ -449,8 +449,41 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ sta->max_amsdu_len = max_amsdu_len; - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, - &cfg_cmd); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_CONFIG_CMD, 0); + if (cmd_ver == 4) { + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, + sizeof(cfg_cmd), &cfg_cmd); + } else if (cmd_ver < 4) { + struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = { + .sta_id = cfg_cmd.sta_id, + .max_ch_width = cfg_cmd.max_ch_width, + .mode = cfg_cmd.mode, + .chains = cfg_cmd.chains, + .amsdu = !!cfg_cmd.max_mpdu_len, + .flags = cfg_cmd.flags, + .non_ht_rates = cfg_cmd.non_ht_rates, + .ht_rates[0][0] = cfg_cmd.ht_rates[0][0], + .ht_rates[0][1] = cfg_cmd.ht_rates[0][1], + .ht_rates[1][0] = cfg_cmd.ht_rates[1][0], + .ht_rates[1][1] = cfg_cmd.ht_rates[1][1], + .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp, + .max_mpdu_len = cfg_cmd.max_mpdu_len, + }; + + u16 cmd_size = sizeof(cfg_cmd_v3); + + /* In old versions of the API the struct is 4 bytes smaller */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_CONFIG_CMD, 0) < 3) + cmd_size -= 4; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, + &cfg_cmd_v3); + } else { + ret = -EINVAL; + } + if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d22f40a5354d..64446a11ef98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -527,40 +527,19 @@ struct iwl_mvm_stat_data { u8 *beacon_average_energy; }; -static void iwl_mvm_stat_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +struct iwl_mvm_stat_data_all_macs { + struct iwl_mvm *mvm; + __le32 flags; + struct iwl_statistics_ntfy_per_mac *per_mac_stats; +}; + +static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) { - struct iwl_mvm_stat_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - int sig = -data->beacon_filter_average_energy; - int last_event; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; - u16 id = le32_to_cpu(data->mac_id); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u16 vif_id = mvmvif->id; - - /* This doesn't need the MAC ID check since it's not taking the - * data copied into the "data" struct, but rather the data from - * the notification directly. - */ - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(data->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = - -data->beacon_average_energy[vif_id]; - - /* make sure that beacon statistics don't go backwards with TCM - * request to clear statistics - */ - if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; - - if (mvmvif->id != id) - return; - - if (vif->type != NL80211_IFTYPE_STATION) - return; + int last_event; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); @@ -618,6 +597,73 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, } } +static void iwl_mvm_stat_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data *data = _data; + int sig = -data->beacon_filter_average_energy; + u16 id = le32_to_cpu(data->mac_id); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 vif_id = mvmvif->id; + + /* This doesn't need the MAC ID check since it's not taking the + * data copied into the "data" struct, but rather the data from + * the notification directly. + */ + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(data->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -data->beacon_average_energy[vif_id]; + + if (mvmvif->id != id) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + + iwl_mvm_update_vif_sig(vif, sig); +} + +static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data_all_macs *data = _data; + struct iwl_statistics_ntfy_per_mac *mac_stats; + int sig; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 vif_id = mvmvif->id; + + if (WARN_ONCE(vif_id > MAC_INDEX_AUX, "invalid vif id: %d", vif_id)) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + mac_stats = &data->per_mac_stats[vif_id]; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(mac_stats->beacon_counter); + mvmvif->beacon_stats.avg_signal = + -le32_to_cpu(mac_stats->beacon_average_energy); + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + + sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); + iwl_mvm_update_vif_sig(vif, sig); +} + static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -684,47 +730,41 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le, } static void -iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) +iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, + struct iwl_statistics_operational_ntfy *stats) +{ + struct iwl_mvm_stat_data_all_macs data = { + .mvm = mvm, + .flags = stats->flags, + .per_mac_stats = stats->per_mac_stats, + }; + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_stat_iterator_all_macs, + &data); +} + +static void +iwl_mvm_stats_ver_14(struct iwl_mvm *mvm, + struct iwl_statistics_operational_ntfy_ver_14 *stats) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; + u8 beacon_average_energy[MAC_INDEX_AUX]; - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; - struct iwl_statistics_operational_ntfy *stats; - int expected_size; __le32 flags; int i; - expected_size = sizeof(*stats); - if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, - "received invalid statistics size (%d)!, expected_size: %d\n", - iwl_rx_packet_payload_len(pkt), expected_size)) - return; - - stats = (void *)&pkt->data; - - if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL || - stats->hdr.version != - iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, STATISTICS_CMD, 0), - "received unsupported hdr type %d, version %d\n", - stats->hdr.type, stats->hdr.version)) - return; - flags = stats->flags; - mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); - mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); - mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); - mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); - - iwl_mvm_rx_stats_check_trigger(mvm, pkt); data.mac_id = stats->mac_id; data.beacon_filter_average_energy = le32_to_cpu(stats->beacon_filter_average_energy); data.flags = flags; data.beacon_counter = stats->beacon_counter; + for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++) beacon_average_energy[i] = le32_to_cpu(stats->beacon_average_energy[i]); @@ -735,9 +775,105 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); +} + +static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt, + u32 expected_size) +{ + struct iwl_statistics_ntfy_hdr *hdr; + + if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, + "received invalid statistics size (%d)!, expected_size: %d\n", + iwl_rx_packet_payload_len(pkt), expected_size)) + return false; + + hdr = (void *)&pkt->data; + + if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL || + hdr->version != + iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0), + "received unsupported hdr type %d, version %d\n", + hdr->type, hdr->version)) + return false; + + if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size, + "received invalid statistics size in header (%d)!, expected_size: %d\n", + le16_to_cpu(hdr->size), expected_size)) + return false; + + return true; +} + +static void +iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + __le32 air_time[MAC_INDEX_AUX]; + __le32 rx_bytes[MAC_INDEX_AUX]; + __le32 flags = 0; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0); + + if (WARN_ONCE(notif_ver > 15, + "invalid statistics version id: %d\n", notif_ver)) + return; + + if (notif_ver == 14) { + struct iwl_statistics_operational_ntfy_ver_14 *stats = + (void *)pkt->data; + + if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) + return; + + iwl_mvm_stats_ver_14(mvm, stats); + + flags = stats->flags; + mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); + mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->on_time_scan); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = le32_to_cpu(stats->average_energy[i]); + + for (i = 0; i < ARRAY_SIZE(air_time); i++) { + air_time[i] = stats->air_time[i]; + rx_bytes[i] = stats->rx_bytes[i]; + } + } + + if (notif_ver == 15) { + struct iwl_statistics_operational_ntfy *stats = + (void *)pkt->data; + + if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) + return; + + iwl_mvm_stats_ver_15(mvm, stats); + + flags = stats->flags; + mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); + mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->on_time_scan); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = + le32_to_cpu(stats->per_sta_stats[i].average_energy); + + for (i = 0; i < ARRAY_SIZE(air_time); i++) { + air_time[i] = stats->per_mac_stats[i].air_time; + rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; + } + } + + iwl_mvm_rx_stats_check_trigger(mvm, pkt); - for (i = 0; i < ARRAY_SIZE(average_energy); i++) - average_energy[i] = le32_to_cpu(stats->average_energy[i]); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); /* @@ -746,8 +882,7 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - iwl_mvm_update_tcm_from_stats(mvm, stats->air_time, - stats->rx_bytes); + iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, @@ -761,8 +896,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, u8 *energy; /* From ver 14 and up we use TLV statistics format */ - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - STATISTICS_CMD, 0) >= 14) + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0) >= 14) return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index e0601f802628..295629c5c035 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -121,12 +121,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + u8 mic_crc_len = u8_get_bits(desc->mac_flags1, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } + /* + * For non monitor interface strip the bytes the RADA might not have + * removed. As monitor interface cannot exist with other interfaces + * this removal is safe. + */ + if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { + u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); + + /* + * If RADA was not enabled then decryption was not performed so + * the MIC cannot be removed. + */ + if (!(pkt_flags & FH_RSCSR_RADA_EN)) { + if (WARN_ON(crypt_len > mic_crc_len)) + return -EINVAL; + + mic_crc_len -= crypt_len; + } + + if (WARN_ON(mic_crc_len > len)) + return -EINVAL; + + len -= mic_crc_len; + } + /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that @@ -149,18 +176,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, */ hdrlen += crypt_len; - if (WARN_ONCE(headlen < hdrlen, - "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", - hdrlen, len, crypt_len)) { - /* - * We warn and trace because we want to be able to see - * it in trace-cmd as well. - */ - IWL_DEBUG_RX(mvm, - "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", - hdrlen, len, crypt_len); + if (unlikely(headlen < hdrlen)) return -EINVAL; - } skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -172,8 +189,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, * in the cases the hardware didn't handle, since it's rare to see * such packets, even though the hardware did calculate the checksum * in this case, just starting after the MAC header instead. + * + * Starting from Bz hardware, it calculates starting directly after + * the MAC header, so that matches mac80211's expectation. */ - if (skb->ip_summed == CHECKSUM_COMPLETE) { + if (skb->ip_summed == CHECKSUM_COMPLETE && + mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) { struct { u8 hdr[6]; __be16 type; @@ -766,8 +787,11 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); - if (WARN_ON_ONCE(!ba_data)) + if (!ba_data) { + WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), + "BAID %d not found in map\n", baid); goto out; + } sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) @@ -1961,8 +1985,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_MCS_NSS_MSK) >> + rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a138b5c4cce8..5f92a09db374 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -579,7 +579,9 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, profile->ssid_index = i; /* Support any cipher and auth algorithm */ profile->unicast_cipher = 0xff; - profile->auth_alg = 0xff; + profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED | + IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X | + IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE; profile->network_type = IWL_NETWORK_TYPE_ANY; profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; @@ -1394,8 +1396,8 @@ static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) } static void -iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm, - struct iwl_scan_general_params_v10 *general_params, +iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, + struct iwl_scan_general_params_v11 *general_params, struct iwl_mvm_scan_params *params) { struct iwl_mvm_scan_timing_params *timing, *hb_timing; @@ -1826,8 +1828,6 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, } } - flags = bssid_bitmap | (s_ssid_bitmap << 16); - if (cfg80211_channel_is_psc(params->channels[i]) && psc_no_listen) flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN; @@ -1869,8 +1869,11 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, (s_max > 1 || b_max > 3)); } if ((allow_passive && force_passive) || - (!flags && !cfg80211_channel_is_psc(params->channels[i]))) + (!(bssid_bitmap | s_ssid_bitmap) && + !cfg80211_channel_is_psc(params->channels[i]))) flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; + else + flags |= bssid_bitmap | (s_ssid_bitmap << 16); channel_cfg[i].flags |= cpu_to_le32(flags); } @@ -1924,22 +1927,19 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, } /* - * 6GHz passive scan is allowed while associated in a defined time - * interval following HW reset or resume flow + * 6GHz passive scan is allowed in a defined time interval following HW + * reset or resume flow, or while not associated and a large interval + * has passed since the last 6GHz passive scan. */ - if (vif->bss_conf.assoc && + if ((vif->bss_conf.assoc || + time_after(mvm->last_6ghz_passive_scan_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) && (time_before(mvm->last_reset_or_resume_time_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), jiffies))) { - IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n"); - return; - } - - /* No need for 6GHz passive scan if not enough time elapsed */ - if (time_after(mvm->last_6ghz_passive_scan_jiffies + - (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) { - IWL_DEBUG_SCAN(mvm, - "6GHz passive scan: timeout did not expire\n"); + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n", + vif->bss_conf.assoc ? "associated" : + "timeout did not expire"); return; } @@ -2037,6 +2037,12 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, if (params->enable_6ghz_passive) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; + if (iwl_mvm_is_oce_supported(mvm) && + (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP | + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE | + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE; + return flags; } @@ -2238,15 +2244,15 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } static void -iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, - struct iwl_scan_general_params_v10 *gp, + struct iwl_scan_general_params_v11 *gp, u16 gen_flags) { struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_scan_umac_dwell_v10(mvm, gp, params); + iwl_mvm_scan_umac_dwell_v11(mvm, gp, params); gp->flags = cpu_to_le16(gen_flags); @@ -2350,7 +2356,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, &scan_p->general_params, gen_flags); @@ -2367,12 +2373,13 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params, int type, - int uid) +static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, + int type, int uid, u32 version) { - struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd; - struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params; + struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd; + struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params; struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params; struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params; int ret; @@ -2385,7 +2392,7 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, &scan_p->general_params, gen_flags); @@ -2425,6 +2432,20 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } +static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14); +} + +static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15); +} + static int iwl_mvm_num_scans(struct iwl_mvm *mvm) { return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); @@ -2498,7 +2519,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } -#define SCAN_TIMEOUT 20000 +#define SCAN_TIMEOUT 30000 void iwl_mvm_scan_timeout_wk(struct work_struct *work) { @@ -2540,6 +2561,7 @@ struct iwl_scan_umac_handler { static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { /* set the newest version first to shorten the list traverse time */ + IWL_SCAN_UMAC_HANDLER(15), IWL_SCAN_UMAC_HANDLER(14), IWL_SCAN_UMAC_HANDLER(12), }; @@ -2940,15 +2962,14 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) 1 * HZ); } -#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \ - case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \ -} - static int iwl_scan_req_umac_get_size(u8 scan_ver) { switch (scan_ver) { - IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14); - IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12); + case 12: + return sizeof(struct iwl_scan_req_umac_v12); + case 14: + case 15: + return sizeof(struct iwl_scan_req_umac_v15); } return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index a64874c05ced..feab0bfcd7a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2684,6 +2684,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); + + /* + * After we've deleted it, do another queue sync + * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently + * running it won't find a new session in the old + * BAID. It can find the NULL pointer for the BAID, + * but we must not have it find a different session. + */ + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, + true, NULL, 0); } return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 32b4d1935788..e34b82b2a288 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data { * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). + * @pairwise_cipher: used to feed iwlmei upon authorization * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -415,6 +416,7 @@ struct iwl_mvm_sta { u8 sleep_tx_count; u8 avg_energy; u8 tx_ant; + u32 pairwise_cipher; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index e91f8e889df7..ab06dcda1462 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -49,14 +49,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* - * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. + * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); @@ -82,9 +81,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); } - } else { + } + + /* + * Clear the ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ @@ -687,11 +696,14 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); - /* When session protection is supported, the te_data->id field + /* When session protection is used, the te_data->id field * is reused to save session protection's configuration. + * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set + * to HOT_SPOT_CMD. */ if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && + id != HOT_SPOT_CMD) { if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, mvmvif, id); @@ -1027,7 +1039,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->time_event_data); + &mvmvif->hs_time_event_data); iwl_mvm_roc_finished(mvm); } @@ -1158,15 +1170,10 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; - /* The time_event_data.id field is reused to save session - * protection's configuration. - */ - mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; - cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); - lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); @@ -1180,6 +1187,11 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, } iwl_mvm_te_clear_data(mvm, te_data); + /* + * The time_event_data.id field is reused to save session + * protection's configuration. + */ + te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); te_data->vif = vif; spin_unlock_bh(&mvm->time_event_lock); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index bdd4ee432548..6fa2c12f7955 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -39,11 +39,11 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, - u16 offload_assist) +static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, bool amsdu) { + struct ieee80211_hdr *hdr = (void *)skb->data; + u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; @@ -106,8 +106,7 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ - if (skb->protocol == htons(ETH_P_IP) && - (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { + if (skb->protocol == htons(ETH_P_IP) && amsdu) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } @@ -132,9 +131,63 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, out: #endif + if (amsdu) + offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + else if (ieee80211_hdrlen(hdr->frame_control) % 4) + /* padding is inserted later in transport */ + offload_assist |= BIT(TX_CMD_OFFLD_PAD); + + return offload_assist; +} + +u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + unsigned int csum_start = skb_checksum_start_offset(skb); + + offload_assist |= u32_encode_bits(hdrlen / 2, + IWL_TX_CMD_OFFLD_BZ_MH_LEN); + if (amsdu) + offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; + else if (hdrlen % 4) + /* padding is inserted later in transport */ + offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return offload_assist; + + offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | + IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; + + /* + * mac80211 will always calculate checksum in software for + * non-fast-xmit, and so we can only do offloaded checksum + * for fast-xmit frames. In this case, we always have the + * RFC 1042 header present. skb_checksum_start_offset() + * returns the offset from the beginning, but the hardware + * needs it from after the header & SNAP header. + */ + csum_start -= hdrlen + 8; + + offload_assist |= u32_encode_bits(csum_start, + IWL_TX_CMD_OFFLD_BZ_START_OFFS); + offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, + IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); + return offload_assist; } +static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, + bool amsdu) +{ + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); + return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); +} + /* * Sets most of the Tx cmd's fields */ @@ -146,7 +199,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; - u16 offload_assist = 0; + bool amsdu = false; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || @@ -166,8 +219,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; - if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) - offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); @@ -234,14 +286,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; - /* padding is inserted later in transport */ - if (ieee80211_hdrlen(fc) % 4 && - !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) - offload_assist |= BIT(TX_CMD_OFFLD_PAD); - - tx_cmd->offload_assist |= - cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info, - offload_assist)); + tx_cmd->offload_assist = + cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, @@ -269,17 +315,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, u8 rate_plcp; u32 rate_flags = 0; bool is_cck; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); /* info->control is only relevant for non HW rate control */ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, - le16_to_cpu(fc), mvmsta->sta_state); + le16_to_cpu(fc), sta ? mvmsta->sta_state : -1); rate_idx = info->control.rates[0].idx; } @@ -462,27 +509,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { - u16 offload_assist = 0; u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; + bool amsdu = false; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); - if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) - offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } - offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info, - offload_assist); - - /* padding is inserted later in transport */ - if (ieee80211_hdrlen(hdr->frame_control) % 4 && - !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) - offload_assist |= BIT(TX_CMD_OFFLD_PAD); - if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; @@ -502,8 +540,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; + u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, + info, amsdu); - cmd->offload_assist |= cpu_to_le32(offload_assist); + cmd->offload_assist = cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); @@ -515,8 +555,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, + info, + amsdu); - cmd->offload_assist |= cpu_to_le16(offload_assist); + cmd->offload_assist = cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); @@ -1128,6 +1171,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); + if (ieee80211_is_data(fc)) + iwl_mvm_mei_tx_copy_to_csme(mvm, skb, + info->control.hw_key ? + info->control.hw_key->iv_len : 0); + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index caf1dcf48888..1f3e90e5dbd4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -340,25 +340,64 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_request_smps(vif, smps_mode); } +static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION); + + return true; +} + int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, }; + struct iwl_host_cmd cmd = { .id = STATISTICS_CMD, .len[0] = sizeof(scmd), .data[0] = &scmd, - .flags = CMD_WANT_SKB, }; int ret; - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) - return ret; + /* From version 15 - STATISTICS_NOTIFICATION, the reply for + * STATISTICS_CMD is empty, and the response is with + * STATISTICS_NOTIFICATION notification + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0) < 15) { + cmd.flags = CMD_WANT_SKB; - iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); - iwl_free_resp(&cmd); + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); + iwl_free_resp(&cmd); + } else { + struct iwl_notification_wait stats_wait; + static const u16 stats_complete[] = { + STATISTICS_NOTIFICATION, + }; + + iwl_init_notification_wait(&mvm->notif_wait, &stats_wait, + stats_complete, ARRAY_SIZE(stats_complete), + iwl_wait_stats_complete, NULL); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &stats_wait); + return ret; + } + + /* 200ms should be enough for FW to collect data from all + * LMACs and send STATISTICS_NOTIFICATION to host + */ + ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5); + if (ret) + return ret; + } if (clear) iwl_mvm_accu_radio_stats(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c new file mode 100644 index 000000000000..78450366312b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ +#include "mvm.h" +#include <linux/nl80211-vnd-intel.h> +#include <net/netlink.h> + +static const struct nla_policy +iwl_mvm_vendor_attr_policy[NUM_IWL_MVM_VENDOR_ATTR] = { + [IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_AUTH_MODE] = { .type = NLA_U32 }, + [IWL_MVM_VENDOR_ATTR_CHANNEL_NUM] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_SSID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_SSID_LEN }, + [IWL_MVM_VENDOR_ATTR_BAND] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_COLLOC_CHANNEL] = { .type = NLA_U8 }, + [IWL_MVM_VENDOR_ATTR_COLLOC_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, +}; + +static int iwl_mvm_vendor_get_csme_conn_info(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_csme_conn_info *csme_conn_info; + struct sk_buff *skb; + int err = 0; + + mutex_lock(&mvm->mutex); + csme_conn_info = iwl_mvm_get_csme_conn_info(mvm); + + if (!csme_conn_info) { + err = -EINVAL; + goto out_unlock; + } + + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 200); + if (!skb) { + err = -ENOMEM; + goto out_unlock; + } + + if (nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_AUTH_MODE, + csme_conn_info->conn_info.auth_mode) || + nla_put(skb, IWL_MVM_VENDOR_ATTR_SSID, + csme_conn_info->conn_info.ssid_len, + csme_conn_info->conn_info.ssid) || + nla_put_u32(skb, IWL_MVM_VENDOR_ATTR_STA_CIPHER, + csme_conn_info->conn_info.pairwise_cipher) || + nla_put_u8(skb, IWL_MVM_VENDOR_ATTR_CHANNEL_NUM, + csme_conn_info->conn_info.channel) || + nla_put(skb, IWL_MVM_VENDOR_ATTR_ADDR, ETH_ALEN, + csme_conn_info->conn_info.bssid)) { + kfree_skb(skb); + err = -ENOBUFS; + } + +out_unlock: + mutex_unlock(&mvm->mutex); + if (err) + return err; + + return cfg80211_vendor_cmd_reply(skb); +} + +static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + iwl_mvm_mei_get_ownership(mvm); + mutex_unlock(&mvm->mutex); + + return 0; +} + +static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = { + { + .info = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_GET_CSME_CONN_INFO, + }, + .doit = iwl_mvm_vendor_get_csme_conn_info, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV, + .policy = iwl_mvm_vendor_attr_policy, + .maxattr = MAX_IWL_MVM_VENDOR_ATTR, + }, + { + .info = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_HOST_GET_OWNERSHIP, + }, + .doit = iwl_mvm_vendor_host_get_ownership, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV, + .policy = iwl_mvm_vendor_attr_policy, + .maxattr = MAX_IWL_MVM_VENDOR_ATTR, + }, +}; + +enum iwl_mvm_vendor_events_idx { + /* 0x0 - 0x3 are deprecated */ + IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, + NUM_IWL_MVM_VENDOR_EVENT_IDX +}; + +static const struct nl80211_vendor_cmd_info +iwl_mvm_vendor_events[NUM_IWL_MVM_VENDOR_EVENT_IDX] = { + [IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN] = { + .vendor_id = INTEL_OUI, + .subcmd = IWL_MVM_VENDOR_CMD_ROAMING_FORBIDDEN_EVENT, + }, +}; + +void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) +{ + mvm->hw->wiphy->vendor_commands = iwl_mvm_vendor_commands; + mvm->hw->wiphy->n_vendor_commands = ARRAY_SIZE(iwl_mvm_vendor_commands); + mvm->hw->wiphy->vendor_events = iwl_mvm_vendor_events; + mvm->hw->wiphy->n_vendor_events = ARRAY_SIZE(iwl_mvm_vendor_events); +} + +void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool forbidden) +{ + struct sk_buff *msg = + cfg80211_vendor_event_alloc(mvm->hw->wiphy, + ieee80211_vif_to_wdev(vif), + 200, IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN, + GFP_ATOMIC); + if (!msg) + return; + + if (WARN_ON(!vif)) + return; + + if (nla_put(msg, IWL_MVM_VENDOR_ATTR_VIF_ADDR, + ETH_ALEN, vif->addr) || + nla_put_u8(msg, IWL_MVM_VENDOR_ATTR_ROAMING_FORBIDDEN, forbidden)) + goto nla_put_failure; + + cfg80211_vendor_event(msg, GFP_ATOMIC); + return; + + nla_put_failure: + kfree_skb(msg); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index c574f041f096..5178e852c5d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -510,16 +510,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _no_160, _cores, _cdb, _cfg, _name) \ + _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ - .mac_step = _mac_step, .cdb = _cdb } + .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, _cfg, _name) + IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -562,6 +562,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), @@ -665,97 +666,111 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), - IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), + /* SO with GF2 */ + IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + + /* MA with GF2 */ + IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ @@ -763,176 +778,176 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QnJ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ @@ -940,303 +955,352 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr_b0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), /* QnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), /* SnJ with Jf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_name), /* SnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_hr_b0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_mr_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_fm_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, iwl_cfg_gl_a0_fm_a0, iwl_bz_name), +/* BZ Z step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_z0_gf_a0, iwl_bz_name), + +/* BNJ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), + /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name) #endif /* CONFIG_IWLMVM */ @@ -1249,22 +1313,14 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { static int get_crf_id(struct iwl_trans *iwl_trans) { int ret = 0; - u32 wfpm_ctrl_addr; - u32 wfpm_otp_cfg_addr; u32 sd_reg_ver_addr; u32 cdb = 0; u32 val; - if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2; - wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2; + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; - /* Qu/Pu families have other addresses */ - } else { - wfpm_ctrl_addr = WFPM_CTRL_REG; - wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR; + else sd_reg_ver_addr = SD_REG_VER; - } if (!iwl_trans_grab_nic_access(iwl_trans)) { IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); @@ -1273,15 +1329,15 @@ static int get_crf_id(struct iwl_trans *iwl_trans) } /* Enable access to peripheral registers */ - val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr); + val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; - iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val); + iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); /* Read cdb info (also contains the jacket info if needed in the future */ - cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr); + cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { @@ -1337,11 +1393,15 @@ out: static const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, - u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores) + u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) { + int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; - for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) { + if (!num_devices) + return NULL; + + for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && @@ -1368,6 +1428,10 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->cdb != cdb) continue; + if (dev_info->jacket != (u8)IWL_CFG_ANY && + dev_info->jacket != jacket) + continue; + if (dev_info->rf_id != (u8)IWL_CFG_ANY && dev_info->rf_id != rf_id) continue; @@ -1422,15 +1486,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * first trying to load the firmware etc. and potentially only * detecting any problems when the first interface is brought up. */ - ret = iwl_finish_nic_init(iwl_trans); - if (ret) - goto out_free_trans; - if (iwl_trans_grab_nic_access(iwl_trans)) { - /* all good */ - iwl_trans_release_nic_access(iwl_trans); - } else { - ret = -EIO; - goto out_free_trans; + ret = iwl_pcie_prepare_card_hw(iwl_trans); + if (!ret) { + ret = iwl_finish_nic_init(iwl_trans); + if (ret) + goto out_free_trans; + if (iwl_trans_grab_nic_access(iwl_trans)) { + /* all good */ + iwl_trans_release_nic_access(iwl_trans); + } else { + ret = -EIO; + goto out_free_trans; + } } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); @@ -1442,14 +1509,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + ret = -EINVAL; goto out_free_trans; + } dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), - CSR_HW_REV_STEP(iwl_trans->hw_rev), + iwl_trans->hw_rev_step, CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), + CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), IWL_SUBDEVICE_NO_160(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device)); @@ -1488,21 +1558,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) iwl_trans->cfg = cfg_7265d; - if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) { - iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) { - iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) { - iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) { - iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0; - } - } - /* * This is a hack to switch from Qu B0 to Qu C0. We need to * do this for all cfgs that use Qu B0, except for those using @@ -1563,6 +1618,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_trans; pci_set_drvdata(pdev, iwl_trans); + + /* try to get ownership so that we'll know if we don't own it */ + iwl_pcie_prepare_card_hw(iwl_trans); + iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 14602d6d6699..8247014278f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2266,7 +2266,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* + * In some rare cases when the HW is in a bad state, we may + * get this interrupt too early, when prph_info is still NULL. + * So make sure that it's not NULL to prevent crashing. + */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { u32 sleep_notif = le32_to_cpu(trans_pcie->prph_info->sleep_notif); if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 645cb4dd4e5a..0febdcacbd42 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -81,7 +81,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); - iwl_trans_sw_reset(trans); + iwl_trans_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from @@ -105,9 +105,12 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); - else + else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); + else + iwl_write32(trans, CSR_DOORBELL_VECTOR, + UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, @@ -166,7 +169,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); - iwl_trans_sw_reset(trans); + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -196,9 +200,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) * interrupt */ iwl_enable_rfkill_int(trans); - - /* re-take ownership to prevent other users from stealing the device */ - iwl_pcie_prepare_card_hw(trans); } void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 1efb53f78a62..a63386a01232 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -24,6 +24,7 @@ #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" +#include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" @@ -126,7 +127,8 @@ out: kfree(buf); } -static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) +static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, + bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) @@ -136,6 +138,11 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); usleep_range(5000, 6000); + + if (retake_ownership) + return iwl_pcie_prepare_card_hw(trans); + + return 0; } static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) @@ -381,9 +388,11 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - iwl_trans_pcie_sw_reset(trans); + ret = iwl_trans_pcie_sw_reset(trans, true); + + if (!ret) + ret = iwl_finish_nic_init(trans); - ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, @@ -408,7 +417,10 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); - iwl_trans_pcie_sw_reset(trans); + ret = iwl_trans_pcie_sw_reset(trans, true); + if (ret) + IWL_ERR(trans, + "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); @@ -514,7 +526,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) return; } - iwl_trans_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from @@ -594,8 +606,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -608,8 +622,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } + + if (iwl_mei_is_connected()) { + IWL_DEBUG_INFO(trans, + "Couldn't prepare the card but SAP is connected\n"); + trans->csme_own = true; + if (trans->trans_cfg->device_family != + IWL_DEVICE_FAMILY_9000) + IWL_ERR(trans, + "SAP not supported for this NIC family\n"); + + return -EBUSY; + } usleep_range(200, 1000); t += 200; @@ -1244,7 +1272,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); - iwl_trans_pcie_sw_reset(trans); + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_pcie_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -1274,9 +1303,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) * interrupt */ iwl_enable_rfkill_int(trans); - - /* re-take ownership to prevent other users from stealing the device */ - iwl_pcie_prepare_card_hw(trans); } void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) @@ -1482,33 +1508,54 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, true); } +static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : + UREG_DOORBELL_TO_ISR6_RESUME); + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, + suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : + CSR_IPC_SLEEP_CONTROL_RESUME); + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); + } else { + return 0; + } + + ret = wait_event_timeout(trans_pcie->sx_waitq, + trans_pcie->sx_complete, 2 * HZ); + + /* Invalidate it toward next suspend or resume */ + trans_pcie->sx_complete = false; + + if (!ret) { + IWL_ERR(trans, "Timeout %s D3\n", + suspend ? "entering" : "exiting"); + return -ETIMEDOUT; + } + + return 0; +} + static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_SUSPEND); - - ret = wait_event_timeout(trans_pcie->sx_waitq, - trans_pcie->sx_complete, 2 * HZ); - /* - * Invalidate it toward resume. - */ - trans_pcie->sx_complete = false; + ret = iwl_pcie_d3_handshake(trans, true); + if (ret) + return ret; - if (!ret) { - IWL_ERR(trans, "Timeout entering D3\n"); - return -ETIMEDOUT; - } - } iwl_pcie_d3_complete_suspend(trans, test, reset); return 0; @@ -1525,6 +1572,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; + ret = 0; goto out; } @@ -1573,25 +1621,10 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, *status = IWL_D3_STATUS_ALIVE; out: - if (*status == IWL_D3_STATUS_ALIVE && - trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - trans_pcie->sx_complete = false; - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_RESUME); - - ret = wait_event_timeout(trans_pcie->sx_waitq, - trans_pcie->sx_complete, 2 * HZ); - /* - * Invalidate it toward next suspend. - */ - trans_pcie->sx_complete = false; + if (*status == IWL_D3_STATUS_ALIVE) + ret = iwl_pcie_d3_handshake(trans, false); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D3\n"); - return -ETIMEDOUT; - } - } - return 0; + return ret; } static void @@ -1778,9 +1811,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - iwl_trans_pcie_sw_reset(trans); - - return 0; + return iwl_trans_pcie_sw_reset(trans, true); } static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) @@ -1800,7 +1831,9 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) if (err) return err; - iwl_trans_pcie_sw_reset(trans); + err = iwl_trans_pcie_sw_reset(trans, true); + if (err) + return err; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && trans->trans_cfg->integrated) { @@ -3599,8 +3632,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * in the old format. */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) - trans->hw_rev = (trans->hw_rev & 0xfff0) | - (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + trans->hw_rev_step = trans->hw_rev & 0xF; + else + trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 451b06069350..0730657d54bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1072,6 +1072,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) @@ -1752,8 +1753,11 @@ static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, } if (test_bit(STATUS_FW_ERROR, &trans->status)) { - IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); - dump_stack(); + if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, + &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + } ret = -EIO; goto cancel; } diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index e459e7192ae9..b74f4cb5d6d3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -1815,8 +1815,9 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) memset(&txdesc, 0, sizeof(txdesc)); /* skb->data starts with txdesc->frame_control */ - hdr_len = 24; - skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len); + hdr_len = sizeof(txdesc.header); + BUILD_BUG_ON(hdr_len != 24); + skb_copy_from_linear_data(skb, &txdesc.header, hdr_len); if (ieee80211_is_data(txdesc.frame_control) && ieee80211_has_a4(txdesc.frame_control) && skb->len >= 30) { diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h index dd2603d9b5d3..c25cd21d18bd 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h +++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h @@ -115,12 +115,14 @@ struct hfa384x_tx_frame { __le16 tx_control; /* HFA384X_TX_CTRL_ flags */ /* 802.11 */ - __le16 frame_control; /* parts not used */ - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; /* filled by firmware */ - u8 addr3[ETH_ALEN]; - __le16 seq_ctrl; /* filled by firmware */ + struct_group(header, + __le16 frame_control; /* parts not used */ + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; /* filled by firmware */ + u8 addr3[ETH_ALEN]; + __le16 seq_ctrl; /* filled by firmware */ + ); u8 addr4[ETH_ALEN]; __le16 data_len; diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 873fea59894f..8414aa208655 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -431,11 +431,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) * Clear manually, ieee80211_tx_info_clear_status would * clear the counts too and we need them. */ - memset(&info->status.ack_signal, 0, - sizeof(struct ieee80211_tx_info) - - offsetof(struct ieee80211_tx_info, status.ack_signal)); - BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, - status.ack_signal) != 20); + memset_after(&info->status, 0, rates); if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) pad = entry_data->align[0]; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 23219f3747f8..0307a6677907 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1276,7 +1276,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* If the queue contains MAX_QUEUE skb's drop some */ if (skb_queue_len(&data->pending) >= MAX_QUEUE) { - /* Droping until WARN_QUEUE level */ + /* Dropping until WARN_QUEUE level */ while (skb_queue_len(&data->pending) >= WARN_QUEUE) { ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); data->tx_dropped++; diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h index dfa22468b14a..ceff4b92e7a1 100644 --- a/drivers/net/wireless/marvell/libertas/host.h +++ b/drivers/net/wireless/marvell/libertas/host.h @@ -308,10 +308,12 @@ struct txpd { __le32 tx_packet_location; /* Tx packet length */ __le16 tx_packet_length; - /* First 2 byte of destination MAC address */ - u8 tx_dest_addr_high[2]; - /* Last 4 byte of destination MAC address */ - u8 tx_dest_addr_low[4]; + struct_group_attr(tx_dest_addr, __packed, + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + ); /* Pkt Priority */ u8 priority; /* Pkt Trasnit Power control */ diff --git a/drivers/net/wireless/marvell/libertas/tx.c b/drivers/net/wireless/marvell/libertas/tx.c index aeb481740df6..27304a98787d 100644 --- a/drivers/net/wireless/marvell/libertas/tx.c +++ b/drivers/net/wireless/marvell/libertas/tx.c @@ -113,6 +113,7 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) p802x_hdr = skb->data; pkt_len = skb->len; + BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN); if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; @@ -124,10 +125,10 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ - memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); + memcpy(&txpd->tx_dest_addr, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ - memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); + memcpy(&txpd->tx_dest_addr, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index 5d726545d987..631b5da09f86 100644 --- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h @@ -268,10 +268,12 @@ struct txpd { __le32 tx_packet_location; /* Tx packet length */ __le16 tx_packet_length; - /* First 2 byte of destination MAC address */ - u8 tx_dest_addr_high[2]; - /* Last 4 byte of destination MAC address */ - u8 tx_dest_addr_low[4]; + struct_group_attr(tx_dest_addr, __packed, + /* First 2 byte of destination MAC address */ + u8 tx_dest_addr_high[2]; + /* Last 4 byte of destination MAC address */ + u8 tx_dest_addr_low[4]; + ); /* Pkt Priority */ u8 priority; /* Pkt Trasnit Power control */ @@ -280,7 +282,7 @@ struct txpd { u8 pktdelay_2ms; /* reserved */ u8 reserved1; -}; +} __packed; /* RxPD Descriptor */ struct rxpd { @@ -311,7 +313,7 @@ struct rxpd { /* Pkt Priority */ u8 priority; u8 reserved[3]; -}; +} __packed; struct cmd_header { __le16 command; @@ -377,14 +379,14 @@ struct cmd_ds_mac_control { struct cmd_header hdr; __le16 action; u16 reserved; -}; +} __packed; struct cmd_ds_802_11_mac_address { struct cmd_header hdr; __le16 action; uint8_t macadd[ETH_ALEN]; -}; +} __packed; struct cmd_ds_mac_multicast_addr { struct cmd_header hdr; @@ -392,27 +394,27 @@ struct cmd_ds_mac_multicast_addr { __le16 action; __le16 nr_of_adrs; u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; -}; +} __packed; struct cmd_ds_set_mode { struct cmd_header hdr; __le16 mode; -}; +} __packed; struct cmd_ds_set_bssid { struct cmd_header hdr; u8 bssid[6]; u8 activate; -}; +} __packed; struct cmd_ds_802_11_radio_control { struct cmd_header hdr; __le16 action; __le16 control; -}; +} __packed; struct cmd_ds_802_11_rf_channel { @@ -423,20 +425,20 @@ struct cmd_ds_802_11_rf_channel { __le16 rftype; /* unused */ __le16 reserved; /* unused */ u8 channellist[32]; /* unused */ -}; +} __packed; struct cmd_ds_set_boot2_ver { struct cmd_header hdr; __le16 action; __le16 version; -}; +} __packed; struct cmd_ds_802_11_reset { struct cmd_header hdr; __le16 action; -}; +} __packed; struct cmd_ds_802_11_beacon_control { struct cmd_header hdr; @@ -444,14 +446,14 @@ struct cmd_ds_802_11_beacon_control { __le16 action; __le16 beacon_enable; __le16 beacon_period; -}; +} __packed; struct cmd_ds_802_11_beacon_set { struct cmd_header hdr; __le16 len; u8 beacon[MRVL_MAX_BCN_SIZE]; -}; +} __packed; struct cmd_ctrl_node; diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 71492211904b..02a1e1f547d8 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -232,7 +232,8 @@ static void lbtf_tx_work(struct work_struct *work) ieee80211_get_tx_rate(priv->hw, info)->hw_value); /* copy destination address from 802.11 header */ - memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4, + BUILD_BUG_ON(sizeof(txpd->tx_dest_addr) != ETH_ALEN); + memcpy(&txpd->tx_dest_addr, skb->data + sizeof(struct txpd) + 4, ETH_ALEN); txpd->tx_packet_length = cpu_to_le16(len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 2ff23ab259ab..63c25c69ed2b 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -2071,9 +2071,11 @@ struct mwifiex_ie_types_robust_coex { __le32 mode; } __packed; +#define MWIFIEX_VERSION_STR_LENGTH 128 + struct host_cmd_ds_version_ext { u8 version_str_sel; - char version_str[128]; + char version_str[MWIFIEX_VERSION_STR_LENGTH]; } __packed; struct host_cmd_ds_mgmt_frame_reg { diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index f006a3d72b40..88c72d1827a0 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -332,7 +332,7 @@ void mwifiex_set_trans_start(struct net_device *dev) int i; for (i = 0; i < dev->num_tx_queues; i++) - netdev_get_tx_queue(dev, i)->trans_start = jiffies; + txq_trans_cond_update(netdev_get_tx_queue(dev, i)); netif_trans_update(dev); } diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 19b996c6a260..ace7371c4773 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -226,6 +226,23 @@ exit_rx_proc: return 0; } +static void maybe_quirk_fw_disable_ds(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + struct mwifiex_ver_ext ver_ext; + + if (test_and_set_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &adapter->work_flags)) + return; + + memset(&ver_ext, 0, sizeof(ver_ext)); + ver_ext.version_str_sel = 1; + if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, + HostCmd_ACT_GEN_GET, 0, &ver_ext, false)) { + mwifiex_dbg(priv->adapter, MSG, + "Checking hardware revision failed.\n"); + } +} + /* * The main process. * @@ -356,6 +373,7 @@ process_start: if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) { adapter->hw_status = MWIFIEX_HW_STATUS_READY; mwifiex_init_fw_complete(adapter); + maybe_quirk_fw_disable_ds(adapter); } } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 90012cbcfd15..332dd1c8db35 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -524,6 +524,7 @@ enum mwifiex_adapter_work_flags { MWIFIEX_IS_SUSPENDED, MWIFIEX_IS_HS_CONFIGURED, MWIFIEX_IS_HS_ENABLING, + MWIFIEX_IS_REQUESTING_FW_VEREXT, }; struct mwifiex_band_config { @@ -646,7 +647,7 @@ struct mwifiex_private { struct wireless_dev wdev; struct mwifiex_chan_freq_power cfp; u32 versionstrsel; - char version_str[128]; + char version_str[MWIFIEX_VERSION_STR_LENGTH]; #ifdef CONFIG_DEBUG_FS struct dentry *dfs_dev_dir; #endif @@ -1055,6 +1056,8 @@ struct mwifiex_adapter { void *devdump_data; int devdump_len; struct timer_list devdump_timer; + + bool ignore_btcoex_events; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index c3f5583ea70d..d5fb29400bad 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -3152,6 +3152,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter) if (ret) goto err_alloc_buffers; + if (pdev->device == PCIE_DEVICE_ID_MARVELL_88W8897) + adapter->ignore_btcoex_events = true; + return 0; err_alloc_buffers: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 6b5d35d9e69f..1a4ae8a42a31 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -708,11 +708,35 @@ static int mwifiex_ret_ver_ext(struct mwifiex_private *priv, { struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext; + if (test_and_clear_bit(MWIFIEX_IS_REQUESTING_FW_VEREXT, &priv->adapter->work_flags)) { + if (strncmp(ver_ext->version_str, "ChipRev:20, BB:9b(10.00), RF:40(21)", + MWIFIEX_VERSION_STR_LENGTH) == 0) { + struct mwifiex_ds_auto_ds auto_ds = { + .auto_ds = DEEP_SLEEP_OFF, + }; + + mwifiex_dbg(priv->adapter, MSG, + "Bad HW revision detected, disabling deep sleep\n"); + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, + DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, false)) { + mwifiex_dbg(priv->adapter, MSG, + "Disabling deep sleep failed.\n"); + } + } + + return 0; + } + if (version_ext) { version_ext->version_str_sel = ver_ext->version_str_sel; memcpy(version_ext->version_str, ver_ext->version_str, - sizeof(char) * 128); - memcpy(priv->version_str, ver_ext->version_str, 128); + MWIFIEX_VERSION_STR_LENGTH); + memcpy(priv->version_str, ver_ext->version_str, + MWIFIEX_VERSION_STR_LENGTH); + + /* Ensure the version string from the firmware is 0-terminated */ + priv->version_str[MWIFIEX_VERSION_STR_LENGTH - 1] = '\0'; } return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 68c63268e2e6..7d42c5d2dbf6 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -365,10 +365,12 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } @@ -400,11 +402,13 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } } @@ -1058,6 +1062,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_BT_COEX_WLAN_PARA_CHANGE: dev_dbg(adapter->dev, "EVENT: BT coex wlan param update\n"); + if (adapter->ignore_btcoex_events) + break; + mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 9736aa0ab7fd..8f01fcbe9396 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -130,7 +130,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); - return -1; + ret = -1; + goto exit_restore_skb; } break; case MWIFIEX_USB_EP_DATA: diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 529e325498cd..864a2ba9efee 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -4225,9 +4225,11 @@ struct mwl8k_cmd_set_key { __le32 key_info; __le32 key_id; __le16 key_len; - __u8 key_material[MAX_ENCR_KEY_LENGTH]; - __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; - __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; + struct { + __u8 key_material[MAX_ENCR_KEY_LENGTH]; + __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; + __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; + } tkip; __le16 tkip_rsc_low; __le32 tkip_rsc_high; __le16 tkip_tsc_low; @@ -4375,7 +4377,7 @@ static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw, goto done; } - memcpy(cmd->key_material, key->key, keymlen); + memcpy(&cmd->tkip, key->key, keymlen); cmd->action = cpu_to_le32(action); rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 79ab850a45a2..c78ae4b89761 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -34,4 +34,4 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ obj-$(CONFIG_MT7603E) += mt7603/ obj-$(CONFIG_MT7615_COMMON) += mt7615/ obj-$(CONFIG_MT7915E) += mt7915/ -obj-$(CONFIG_MT7921E) += mt7921/ +obj-$(CONFIG_MT7921_COMMON) += mt7921/ diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index b8bcf22a07fd..47e9911ee9fe 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -82,7 +82,7 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data) queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; seq_printf(s, " %9d | %9d | %9d | %9d |\n", - i, q->queued, q->head, q->tail); + i, queued, q->head, q->tail); } return 0; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 5e1c1506a4c6..3a9af8931c35 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -572,9 +572,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) if (data_len < len + q->buf_offset) { dev_kfree_skb(q->rx_head); q->rx_head = NULL; - - skb_free_frag(data); - continue; + goto free_frag; } if (q->rx_head) { @@ -582,11 +580,14 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) continue; } + if (!more && dev->drv->rx_check && + !(dev->drv->rx_check(dev, data, len))) + goto free_frag; + skb = build_skb(data, q->buf_size); - if (!skb) { - skb_free_frag(data); - continue; - } + if (!skb) + goto free_frag; + skb_reserve(skb, q->buf_offset); if (q == &dev->q_rx[MT_RXQ_MCU]) { @@ -603,6 +604,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) } dev->drv->rx_skb(dev, q - dev->q_rx, skb); + continue; + +free_frag: + skb_free_frag(data); } mt76_dma_rx_fill(dev, q); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 2d58aa31db93..a499861918fa 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -65,6 +65,8 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) offset = be32_to_cpup(list); ret = mtd_read(mtd, offset, len, &retlen, eep); put_mtd_device(mtd); + if (mtd_is_bitflip(ret)) + ret = 0; if (ret) { dev_err(dev->dev, "reading EEPROM from mtd %s failed: %i\n", part, ret); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 62807dc311c1..8bb1c7ab5b50 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -185,7 +185,6 @@ const struct cfg80211_sar_capa mt76_sar_capa = { .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), .freq_ranges = &mt76_sar_freq_ranges[0], }; -EXPORT_SYMBOL_GPL(mt76_sar_capa); static int mt76_led_init(struct mt76_dev *dev) { @@ -393,7 +392,7 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, phy->hw->wiphy->bands[band] = NULL; } -static void +static int mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) { struct mt76_dev *dev = phy->dev; @@ -411,8 +410,15 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); - wiphy->available_antennas_tx = dev->phy.antenna_mask; - wiphy->available_antennas_rx = dev->phy.antenna_mask; + wiphy->available_antennas_tx = phy->antenna_mask; + wiphy->available_antennas_rx = phy->antenna_mask; + + wiphy->sar_capa = &mt76_sar_capa; + phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, + sizeof(struct mt76_freq_range_power), + GFP_KERNEL); + if (!phy->frp) + return -ENOMEM; hw->txq_data_size = sizeof(struct mt76_txq); hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; @@ -432,6 +438,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + + return 0; } struct mt76_phy * @@ -472,7 +480,9 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht, { int ret; - mt76_phy_init(phy, phy->hw); + ret = mt76_phy_init(phy, phy->hw); + if (ret) + return ret; if (phy->cap.has_2ghz) { ret = mt76_init_sband_2g(phy, rates, n_rates); @@ -591,7 +601,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, int ret; dev_set_drvdata(dev->dev, dev); - mt76_phy_init(phy, hw); + ret = mt76_phy_init(phy, hw); + if (ret) + return ret; if (phy->cap.has_2ghz) { ret = mt76_init_sband_2g(phy, rates, n_rates); @@ -1163,10 +1175,12 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) if (ps) set_bit(MT_WCID_FLAG_PS, &wcid->flags); - else - clear_bit(MT_WCID_FLAG_PS, &wcid->flags); dev->drv->sta_ps(dev, sta, ps); + + if (!ps) + clear_bit(MT_WCID_FLAG_PS, &wcid->flags); + ieee80211_sta_ps_transition(sta, ps); } @@ -1348,6 +1362,59 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76_get_txpower); +int mt76_init_sar_power(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt76_phy *phy = hw->priv; + const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; + int i; + + if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) + return -EINVAL; + + for (i = 0; i < sar->num_sub_specs; i++) { + u32 index = sar->sub_specs[i].freq_range_index; + /* SAR specifies power limitaton in 0.25dbm */ + s32 power = sar->sub_specs[i].power >> 1; + + if (power > 127 || power < -127) + power = 127; + + phy->frp[index].range = &capa->freq_ranges[index]; + phy->frp[index].power = power; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_init_sar_power); + +int mt76_get_sar_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + int power) +{ + const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; + int freq, i; + + if (!capa || !phy->frp) + return power; + + if (power > 127 || power < -127) + power = 127; + + freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); + for (i = 0 ; i < capa->num_freq_ranges; i++) { + if (phy->frp[i].range && + freq >= phy->frp[i].range->start_freq && + freq < phy->frp[i].range->end_freq) { + power = min_t(int, phy->frp[i].power, power); + break; + } + } + + return power; +} +EXPORT_SYMBOL_GPL(mt76_get_sar_power); + static void __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { @@ -1494,7 +1561,6 @@ EXPORT_SYMBOL_GPL(mt76_init_queue); u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) { int offset = 0; - struct ieee80211_rate *rate; if (phy->chandef.chan->band != NL80211_BAND_2GHZ) offset = 4; @@ -1503,9 +1569,11 @@ u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) if (rateidx < 0) rateidx = 0; - rate = &mt76_rates[offset + rateidx]; + rateidx += offset; + if (rateidx >= ARRAY_SIZE(mt76_rates)) + rateidx = offset; - return rate->hw_value; + return mt76_rates[rateidx].hw_value; } EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index e2da720a91b6..404c3d1a70d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -373,6 +373,8 @@ struct mt76_driver_ops { bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); + bool (*rx_check)(struct mt76_dev *dev, void *data, int len); + void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); @@ -495,6 +497,8 @@ struct mt76_usb { }; #define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE) +#define MT76S_NUM_TX_ENTRIES 256 +#define MT76S_NUM_RX_ENTRIES 512 struct mt76_sdio { struct mt76_worker txrx_worker; struct mt76_worker status_worker; @@ -599,6 +603,8 @@ struct mt76_testmode_data { u8 tx_power[4]; u8 tx_power_control; + u8 addr[3][ETH_ALEN]; + u32 tx_pending; u32 tx_queued; u16 tx_queued_limit; @@ -808,7 +814,6 @@ struct mt76_ethtool_worker_info { } extern struct ieee80211_rate mt76_rates[12]; -extern const struct cfg80211_sar_capa mt76_sar_capa; #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) @@ -1157,6 +1162,11 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm); +int mt76_init_sar_power(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); +int mt76_get_sar_power(struct mt76_phy *phy, + struct ieee80211_channel *chan, + int power); void mt76_csa_check(struct mt76_dev *dev); void mt76_csa_finish(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index fe03e31989bb..a272d64808c3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -202,10 +202,11 @@ void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); - WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, - 0, 5000)); + mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 15000); } + WARN_ON_ONCE(mt76_rr(dev, MT_DMA_FQCR0) & MT_DMA_FQCR0_BUSY); + mt76_wr(dev, MT_TX_ABORT, 0); mt7603_wtbl_set_skip_tx(dev, idx, false); @@ -525,6 +526,10 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) status->flag |= RX_FLAG_MMIC_ERROR; + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { status->flag |= RX_FLAG_DECRYPTED; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 7ac4cd247a73..2b546bc05d82 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -133,13 +133,15 @@ void mt7603_init_edcca(struct mt7603_dev *dev) } static int -mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) +mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def) { + struct mt7603_dev *dev = hw->priv; u8 *rssi_data = (u8 *)dev->mt76.eeprom.data; int idx, ret; u8 bw = MT_BW_20; bool failed = false; + ieee80211_stop_queues(hw); cancel_delayed_work_sync(&dev->mphy.mac_work); tasklet_disable(&dev->mt76.pre_tbtt_tasklet); @@ -205,9 +207,28 @@ out: if (failed) mt7603_mac_work(&dev->mphy.mac_work.work); + ieee80211_wake_queues(hw); + return ret; } +static int mt7603_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt7603_dev *dev = hw->priv; + struct mt76_phy *mphy = &dev->mphy; + int err; + + if (!cfg80211_chandef_valid(&mphy->chandef)) + return -EINVAL; + + err = mt76_init_sar_power(hw, sar); + if (err) + return err; + + return mt7603_set_channel(hw, &mphy->chandef); +} + static int mt7603_config(struct ieee80211_hw *hw, u32 changed) { @@ -215,11 +236,8 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed) int ret = 0; if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | - IEEE80211_CONF_CHANGE_POWER)) { - ieee80211_stop_queues(hw); - ret = mt7603_set_channel(dev, &hw->conf.chandef); - ieee80211_wake_queues(hw); - } + IEEE80211_CONF_CHANGE_POWER)) + ret = mt7603_set_channel(hw, &hw->conf.chandef); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { mutex_lock(&dev->mt76.mutex); @@ -700,6 +718,7 @@ const struct ieee80211_ops mt7603_ops = { .set_tim = mt76_set_tim, .get_survey = mt76_get_survey, .get_antenna = mt76_get_antenna, + .set_sar_specs = mt7603_set_sar_specs, }; MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index 6abfe6b19afa..7884b952b720 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -403,7 +403,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) .tx_streams = n_chains, .rx_streams = n_chains, }; - s8 tx_power; + s8 tx_power = hw->conf.power_level * 2; int i, ret; if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) { @@ -414,7 +414,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) req.center_chan -= 2; } - tx_power = hw->conf.power_level * 2; + tx_power = mt76_get_sar_power(&dev->mphy, chandef->chan, tx_power); if (dev->mphy.antenna_mask == 3) tx_power -= 6; tx_power = min(tx_power, dev->tx_power_limit); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 6fd6f067da49..b53528014fbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -359,6 +359,9 @@ mt7615_queues_acq(struct seq_file *s, void *data) int acs = i / MT7615_MAX_WMM_SETS; u32 ctrl, val, qlen = 0; + if (wmm_idx == 3 && is_mt7663(&dev->mt76)) + continue; + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); ctrl = BIT(31) | BIT(15) | (acs << 8); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 47f23ac905a3..a753c7476d31 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -194,6 +194,7 @@ mt7615_check_offload_capability(struct mt7615_dev *dev) ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + wiphy->flags &= ~WIPHY_FLAG_4ADDR_STATION; wiphy->max_remain_on_channel_duration = 5000; wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 423f69015e3e..ec25e5a95d44 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -249,6 +249,82 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv) #endif } +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + struct ethhdr eth_hdr; + __le32 *rxd = (__le32 *)skb->data; + __le32 qos_ctrl, ht_ctrl; + + if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) != + MT_RXD1_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[0]) & MT_RXD0_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr)); + hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]); + hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]); + qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]); + ht_ctrl = FIELD_GET(MT_RXD7_HT_CONTROL, rxd[7]); + + hdr.duration_id = 0; + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (le16_to_cpu(hdr.frame_control) & + (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + ether_addr_copy(hdr.addr4, eth_hdr.h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr.h_proto == htons(ETH_P_AARP) || + eth_hdr.h_proto == htons(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN)) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, 2), &ht_ctrl, 2); + if (ieee80211_is_data_qos(hdr.frame_control)) + memcpy(skb_push(skb, 2), &qos_ctrl, 2); + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + status->flag &= ~(RX_FLAG_RADIOTAP_HE | RX_FLAG_RADIOTAP_HE_MU); + return 0; +} + static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -263,6 +339,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) u32 rxd2 = le32_to_cpu(rxd[2]); u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; + u16 hdr_gap; int phy_idx; int i, idx; u8 chfreq, amsdu_info, qos_ctl = 0; @@ -286,9 +363,16 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) return -EINVAL; + hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd2 & MT_RXD2_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); - hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS; status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -503,16 +587,42 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) return -EINVAL; } - skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1); status->amsdu = !!amsdu_info; if (status->amsdu) { status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME; status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME; - if (!hdr_trans) { - memmove(skb->data + 2, skb->data, - ieee80211_get_hdrlen_from_skb(skb)); + } + + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7615_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + int pad_start = 0; + + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { + pad_start = ieee80211_get_hdrlen_from_skb(skb); + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { + /* + * When header translation failure is indicated, + * the hardware will insert an extra 2-byte field + * containing the data length after the protocol + * type field. + */ + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + + if (get_unaligned_be16(skb->data + pad_start) != + skb->len - pad_start - 2) + pad_start = 0; + } + + if (pad_start) { + memmove(skb->data + 2, skb->data, pad_start); skb_pull(skb, 2); } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index 46f283eb8d0f..e241c613091c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -86,6 +86,8 @@ enum rx_pkt_type { #define MT_RXD6_SEQ_CTRL GENMASK(15, 0) #define MT_RXD6_QOS_CTL GENMASK(31, 16) +#define MT_RXD7_HT_CONTROL GENMASK(31, 0) + #define MT_RXV1_ACID_DET_H BIT(31) #define MT_RXV1_ACID_DET_L BIT(30) #define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 890d9b07e156..82d625a16a62 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -73,7 +73,7 @@ static int mt7615_start(struct ieee80211_hw *hw) goto out; } - ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); if (ret) goto out; @@ -141,9 +141,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask) if (i) return i - 1; - if (type != NL80211_IFTYPE_STATION) - break; - /* next, try to find a free repeater entry for the sta */ i = get_free_idx(mask >> REPEATER_BSSID_START, 0, REPEATER_BSSID_MAX - REPEATER_BSSID_START); @@ -211,11 +208,9 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->mt76.band_idx = ext_phy; - if (mt7615_ext_phy(dev)) - mvif->mt76.wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) + - mvif->mt76.idx % (MT7615_MAX_WMM_SETS / 2); - else - mvif->mt76.wmm_idx = mvif->mt76.idx % MT7615_MAX_WMM_SETS; + mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + if (ext_phy) + mvif->mt76.wmm_idx += 2; dev->mt76.vif_mask |= BIT(mvif->mt76.idx); dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); @@ -331,7 +326,7 @@ int mt7615_set_channel(struct mt7615_phy *phy) goto out; } - ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); + ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); if (ret) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 25f9cbe2cd61..759dcf0e6783 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -87,7 +87,7 @@ struct mt7663_fw_buf { void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, int cmd, int *wait_seq) { - int txd_len, mcu_cmd = cmd & MCU_CMD_MASK; + int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); struct mt7615_uni_txd *uni_txd; struct mt7615_mcu_txd *mcu_txd; u8 seq, q_idx, pkt_fmt; @@ -103,10 +103,10 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, if (wait_seq) *wait_seq = seq; - txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); txd = (__le32 *)skb_push(skb, txd_len); - if (cmd != MCU_CMD_FW_SCATTER) { + if (cmd != MCU_CMD(FW_SCATTER)) { q_idx = MT_TX_MCU_PORT_RX_Q0; pkt_fmt = MT_TX_TYPE_CMD; } else { @@ -124,7 +124,7 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt); txd[1] = cpu_to_le32(val); - if (cmd & MCU_UNI_PREFIX) { + if (cmd & __MCU_CMD_FIELD_UNI) { uni_txd = (struct mt7615_uni_txd *)txd; uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); uni_txd->option = MCU_CMD_UNI_EXT_ACK; @@ -142,28 +142,17 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, mcu_txd->s2d_index = MCU_S2D_H2N; mcu_txd->pkt_type = MCU_PKT_ID; mcu_txd->seq = seq; + mcu_txd->cid = mcu_cmd; + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); - switch (cmd & ~MCU_CMD_MASK) { - case MCU_FW_PREFIX: - mcu_txd->set_query = MCU_Q_NA; - mcu_txd->cid = mcu_cmd; - break; - case MCU_CE_PREFIX: - if (cmd & MCU_QUERY_MASK) - mcu_txd->set_query = MCU_Q_QUERY; - else - mcu_txd->set_query = MCU_Q_SET; - mcu_txd->cid = mcu_cmd; - break; - default: - mcu_txd->cid = MCU_CMD_EXT_CID; - if (cmd & MCU_QUERY_PREFIX) + if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { + if (cmd & __MCU_CMD_FIELD_QUERY) mcu_txd->set_query = MCU_Q_QUERY; else mcu_txd->set_query = MCU_Q_SET; - mcu_txd->ext_cid = mcu_cmd; - mcu_txd->ext_cid_ack = 1; - break; + mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; + } else { + mcu_txd->set_query = MCU_Q_NA; } } EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg); @@ -184,42 +173,32 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, if (seq != rxd->seq) return -EAGAIN; - switch (cmd) { - case MCU_CMD_PATCH_SEM_CONTROL: + if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { skb_pull(skb, sizeof(*rxd) - 4); ret = *skb->data; - break; - case MCU_EXT_CMD_GET_TEMP: + } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { skb_pull(skb, sizeof(*rxd)); ret = le32_to_cpu(*(__le32 *)skb->data); - break; - case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX: + } else if (cmd == MCU_EXT_QUERY(RF_REG_ACCESS)) { skb_pull(skb, sizeof(*rxd)); ret = le32_to_cpu(*(__le32 *)&skb->data[8]); - break; - case MCU_UNI_CMD_DEV_INFO_UPDATE: - case MCU_UNI_CMD_BSS_INFO_UPDATE: - case MCU_UNI_CMD_STA_REC_UPDATE: - case MCU_UNI_CMD_HIF_CTRL: - case MCU_UNI_CMD_OFFLOAD: - case MCU_UNI_CMD_SUSPEND: { + } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || + cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || + cmd == MCU_UNI_CMD(STA_REC_UPDATE) || + cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(OFFLOAD) || + cmd == MCU_UNI_CMD(SUSPEND)) { struct mt7615_mcu_uni_event *event; skb_pull(skb, sizeof(*rxd)); event = (struct mt7615_mcu_uni_event *)skb->data; ret = le32_to_cpu(event->status); - break; - } - case MCU_CMD_REG_READ: { + } else if (cmd == MCU_CE_QUERY(REG_READ)) { struct mt7615_mcu_reg_event *event; skb_pull(skb, sizeof(*rxd)); event = (struct mt7615_mcu_reg_event *)skb->data; ret = (int)le32_to_cpu(event->val); - break; - } - default: - break; } return ret; @@ -253,8 +232,7 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) .address = cpu_to_le32(reg), }; - return mt76_mcu_send_msg(&dev->mt76, - MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS), &req, sizeof(req), true); } @@ -270,8 +248,8 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) .data = cpu_to_le32(val), }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS), + &req, sizeof(req), false); } void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) @@ -658,8 +636,8 @@ mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif, if (enable) ether_addr_copy(req.addr, addr); - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MUAR_UPDATE, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), + &req, sizeof(req), true); } static int @@ -702,7 +680,7 @@ mt7615_mcu_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif, return mt7615_mcu_muar_config(dev, vif, false, enable); memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE), &data, sizeof(data), true); } @@ -771,7 +749,7 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, dev_kfree_skb(skb); out: - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(BCN_OFFLOAD), &req, sizeof(req), true); } @@ -802,8 +780,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) .band_idx = band, }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), + &req, sizeof(req), true); } static int @@ -843,7 +821,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, bss = (struct bss_info_basic *)tlv; bss->network_type = cpu_to_le32(type); - bss->bmc_tx_wlan_idx = wlan_idx; + bss->bmc_wcid_lo = wlan_idx; bss->wmm_idx = mvif->mt76.wmm_idx; bss->active = enable; @@ -944,7 +922,7 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, mt7615_mcu_bss_ext_tlv(skb, mvif); return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_BSS_INFO_UPDATE, true); + MCU_EXT_CMD(BSS_INFO_UPDATE), true); } static int @@ -966,8 +944,8 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, true, NULL, wtbl_hdr); - err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, - true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(WTBL_UPDATE), true); if (err < 0) return err; @@ -979,7 +957,7 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, mt76_connac_mcu_sta_ba_tlv(skb, params, enable, true); return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + MCU_EXT_CMD(STA_REC_UPDATE), true); } static int @@ -1001,7 +979,7 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false); err = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + MCU_EXT_CMD(STA_REC_UPDATE), true); if (err < 0 || !enable) return err; @@ -1014,8 +992,8 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, false, NULL, wtbl_hdr); - return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, - true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(WTBL_UPDATE), true); } static int @@ -1057,7 +1035,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, NULL, wtbl_hdr); } - cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE; + cmd = enable ? MCU_EXT_CMD(WTBL_UPDATE) : MCU_EXT_CMD(STA_REC_UPDATE); skb = enable ? wskb : sskb; err = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); @@ -1068,7 +1046,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, return err; } - cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE; + cmd = enable ? MCU_EXT_CMD(STA_REC_UPDATE) : MCU_EXT_CMD(WTBL_UPDATE); skb = enable ? sskb : wskb; return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); @@ -1090,8 +1068,8 @@ mt7615_mcu_wtbl_update_hdr_trans(struct mt7615_dev *dev, mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL, wtbl_hdr); - return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, - true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(WTBL_UPDATE), true); } static const struct mt7615_mcu_ops wtbl_update_ops = { @@ -1136,7 +1114,7 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, sta_wtbl, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + MCU_EXT_CMD(STA_REC_UPDATE), true); } static int @@ -1179,7 +1157,7 @@ mt7615_mcu_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable, - MCU_EXT_CMD_STA_REC_UPDATE, false); + MCU_EXT_CMD(STA_REC_UPDATE), false); } static int @@ -1191,7 +1169,7 @@ mt7615_mcu_sta_update_hdr_trans(struct mt7615_dev *dev, return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, - MCU_EXT_CMD_STA_REC_UPDATE); + MCU_EXT_CMD(STA_REC_UPDATE)); } static const struct mt7615_mcu_ops sta_update_ops = { @@ -1285,7 +1263,7 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, dev_kfree_skb(skb); out: - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), &req, sizeof(req), true); } @@ -1314,7 +1292,7 @@ mt7615_mcu_uni_add_sta(struct mt7615_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { return __mt7615_mcu_add_sta(phy->mt76, vif, sta, enable, - MCU_UNI_CMD_STA_REC_UPDATE, true); + MCU_UNI_CMD(STA_REC_UPDATE), true); } static int @@ -1348,7 +1326,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, mt76_connac_mcu_sta_ba_tlv(skb, params, enable, false); err = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + MCU_UNI_CMD(STA_REC_UPDATE), true); if (err < 0 || !enable) return err; @@ -1369,7 +1347,7 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, sta_wtbl, wtbl_hdr); return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + MCU_UNI_CMD(STA_REC_UPDATE), true); } static int @@ -1381,7 +1359,7 @@ mt7615_mcu_sta_uni_update_hdr_trans(struct mt7615_dev *dev, return mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, - MCU_UNI_CMD_STA_REC_UPDATE); + MCU_UNI_CMD(STA_REC_UPDATE)); } static const struct mt7615_mcu_ops uni_update_ops = { @@ -1399,7 +1377,7 @@ static const struct mt7615_mcu_ops uni_update_ops = { int mt7615_mcu_restart(struct mt76_dev *dev) { - return mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, 0, true); + return mt76_mcu_send_msg(dev, MCU_CMD(RESTART_DL_REQ), NULL, 0, true); } EXPORT_SYMBOL_GPL(mt7615_mcu_restart); @@ -1445,7 +1423,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) goto out; } - ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), fw->data + sizeof(*hdr), len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); @@ -1508,7 +1486,7 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev, return err; } - err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), data + offset, len); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); @@ -1644,7 +1622,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev) if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE, FIELD_PREP(MT_TOP_MISC2_FW_STATE, - FW_STATE_CR4_RDY), 500)) { + FW_STATE_RDY), 500)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); return -EIO; } @@ -1694,8 +1672,8 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl) .ctrl_val = ctrl }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, &data, - sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), + &data, sizeof(data), true); } static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev) @@ -1707,7 +1685,7 @@ static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev) .cache_enable = true }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CAL_CACHE, &data, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(CAL_CACHE), &data, sizeof(data), false); } @@ -1756,7 +1734,7 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) goto out; } - ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), fw->data + offset, len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware\n"); @@ -1977,7 +1955,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) skb_put_data(skb, eep + offset, eep_len); return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_EFUSE_BUFFER_MODE, true); + MCU_EXT_CMD(EFUSE_BUFFER_MODE), true); } int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, @@ -2013,8 +1991,8 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, if (params->cw_max) req.cw_max = cpu_to_le16(fls(params->cw_max)); - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), + &req, sizeof(req), true); } int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) @@ -2072,7 +2050,7 @@ int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1); out: - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL, &req, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DBDC_CTRL), &req, sizeof(req), true); } @@ -2082,8 +2060,8 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) .operation = WTBL_RESET_ALL, }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE), + &req, sizeof(req), true); } int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, @@ -2103,8 +2081,8 @@ int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, .val = val, }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), + &req, sizeof(req), true); } int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val) @@ -2117,8 +2095,8 @@ int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val) .min_lpn = cpu_to_le16(val), }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); } int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, @@ -2146,8 +2124,8 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, #undef __req_field }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); } int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, @@ -2193,8 +2171,8 @@ int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, #undef __req_field_u32 }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RADAR_TH), + &req, sizeof(req), true); } int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) @@ -2225,7 +2203,7 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) req.pattern[i].start_time = cpu_to_le32(ts); } - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_PATTERN), &req, sizeof(req), false); } @@ -2394,8 +2372,8 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev) u8 rsv[3]; } req = {}; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), + &req, sizeof(req), true); } int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, @@ -2415,8 +2393,8 @@ int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, .value = cpu_to_le32(val), }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), + &req, sizeof(req), false); } int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) @@ -2434,8 +2412,8 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) }; return mt76_mcu_send_msg(&dev->mt76, - MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, - sizeof(req), true); + MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), + &req, sizeof(req), true); } static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) @@ -2574,7 +2552,7 @@ again: out: req.center_freq = cpu_to_le16(center_freq); - ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req, + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RXDCOC_CAL), &req, sizeof(req), true); if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || @@ -2695,8 +2673,8 @@ again: out: req.center_freq = cpu_to_le16(center_freq); - ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req, - sizeof(req), true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXDPD_CAL), + &req, sizeof(req), true); if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { @@ -2724,7 +2702,7 @@ int mt7615_mcu_set_rx_hdr_trans_blacklist(struct mt7615_dev *dev) .etype = cpu_to_le16(ETH_P_PAE), }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), &req, sizeof(req), false); } @@ -2759,13 +2737,13 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, if (vif->type != NL80211_IFTYPE_STATION) return 0; - err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr, - sizeof(req_hdr), false); + err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), + &req_hdr, sizeof(req_hdr), false); if (err < 0 || !enable) return err; - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), + &req, sizeof(req), false); } int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, @@ -2784,6 +2762,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, phy->roc_grant = false; - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC), + &req, sizeof(req), false); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 98c383e400a1..47863ae9f30b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -76,35 +76,6 @@ struct mt7615_uni_txd { u8 reserved2[4]; } __packed __aligned(4); -/* event table */ -enum { - MCU_EVENT_TARGET_ADDRESS_LEN = 0x01, - MCU_EVENT_FW_START = 0x01, - MCU_EVENT_GENERIC = 0x01, - MCU_EVENT_ACCESS_REG = 0x02, - MCU_EVENT_MT_PATCH_SEM = 0x04, - MCU_EVENT_REG_ACCESS = 0x05, - MCU_EVENT_SCAN_DONE = 0x0d, - MCU_EVENT_ROC = 0x10, - MCU_EVENT_BSS_ABSENCE = 0x11, - MCU_EVENT_BSS_BEACON_LOSS = 0x13, - MCU_EVENT_CH_PRIVILEGE = 0x18, - MCU_EVENT_SCHED_SCAN_DONE = 0x23, - MCU_EVENT_EXT = 0xed, - MCU_EVENT_RESTART_DL = 0xef, - MCU_EVENT_COREDUMP = 0xf0, -}; - -/* ext event table */ -enum { - MCU_EXT_EVENT_PS_SYNC = 0x5, - MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, - MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, - MCU_EXT_EVENT_ASSERT_DUMP = 0x23, - MCU_EXT_EVENT_RDD_REPORT = 0x3a, - MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, -}; - enum { MT_SKU_CCK_1_2 = 0, MT_SKU_CCK_55_11, @@ -234,20 +205,6 @@ struct mt7615_mcu_rdd_report { #define MCU_PKT_ID 0xa0 enum { - MCU_Q_QUERY, - MCU_Q_SET, - MCU_Q_RESERVED, - MCU_Q_NA -}; - -enum { - MCU_S2D_H2N, - MCU_S2D_C2N, - MCU_S2D_H2C, - MCU_S2D_H2CN -}; - -enum { MCU_ATE_SET_FREQ_OFFSET = 0xa, MCU_ATE_SET_TX_POWER_CONTROL = 0x15, }; @@ -281,21 +238,6 @@ struct mt7615_roc_tlv { } __packed; enum { - PATCH_NOT_DL_SEM_FAIL = 0x0, - PATCH_IS_DL = 0x1, - PATCH_NOT_DL_SEM_SUCCESS = 0x2, - PATCH_REL_SEM_SUCCESS = 0x3 -}; - -enum { - FW_STATE_INITIAL = 0, - FW_STATE_FW_DOWNLOAD = 1, - FW_STATE_NORMAL_OPERATION = 2, - FW_STATE_NORMAL_TRX = 3, - FW_STATE_CR4_RDY = 7 -}; - -enum { FW_STATE_PWR_ON = 1, FW_STATE_N9_RDY = 2, }; @@ -312,73 +254,4 @@ enum { __DBDC_TYPE_MAX, }; -struct bss_info_omac { - __le16 tag; - __le16 len; - u8 hw_bss_idx; - u8 omac_idx; - u8 band_idx; - u8 rsv0; - __le32 conn_type; - u32 rsv1; -} __packed; - -struct bss_info_basic { - __le16 tag; - __le16 len; - __le32 network_type; - u8 active; - u8 rsv0; - __le16 bcn_interval; - u8 bssid[ETH_ALEN]; - u8 wmm_idx; - u8 dtim_period; - u8 bmc_tx_wlan_idx; - u8 cipher; /* not used */ - u8 phymode; /* not used */ - u8 rsv1[5]; -} __packed; - -struct bss_info_rf_ch { - __le16 tag; - __le16 len; - u8 pri_ch; - u8 central_ch0; - u8 central_ch1; - u8 bw; -} __packed; - -struct bss_info_ext_bss { - __le16 tag; - __le16 len; - __le32 mbss_tsf_offset; /* in unit of us */ - u8 rsv[8]; -} __packed; - -enum { - BSS_INFO_OMAC, - BSS_INFO_BASIC, - BSS_INFO_RF_CH, /* optional, for BT/LTE coex */ - BSS_INFO_PM, /* sta only */ - BSS_INFO_UAPSD, /* sta only */ - BSS_INFO_ROAM_DETECTION, /* obsoleted */ - BSS_INFO_LQ_RM, /* obsoleted */ - BSS_INFO_EXT_BSS, - BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */ - BSS_INFO_SYNC_MODE, /* obsoleted */ - BSS_INFO_RA, - BSS_INFO_MAX_NUM -}; - -enum { - CH_SWITCH_NORMAL = 0, - CH_SWITCH_SCAN = 3, - CH_SWITCH_MCC = 4, - CH_SWITCH_DFS = 5, - CH_SWITCH_BACKGROUND_SCAN_START = 6, - CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7, - CH_SWITCH_BACKGROUND_SCAN_STOP = 8, - CH_SWITCH_SCAN_BYPASS_DPD = 9 -}; - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 71719c787511..33f72f3657d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -135,6 +135,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t) if (is_mt7663(&dev->mt76)) { mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS); mcu_int &= MT7663_MCU_CMD_ERROR_MASK; + mt76_wr(dev, MT_MCU2HOST_INT_STATUS, mcu_int); } else { mcu_int = mt76_rr(dev, MT_MCU_CMD); mcu_int &= MT_MCU_CMD_ERROR_MASK; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c index a2465b49ecd0..87b4aa52ee0f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -28,8 +28,6 @@ static void mt7615_pci_init_work(struct work_struct *work) return; mt7615_init_work(dev); - if (dev->dbdc_support) - mt7615_register_ext_phy(dev); } static int mt7615_init_hardware(struct mt7615_dev *dev) @@ -160,6 +158,12 @@ int mt7615_register_device(struct mt7615_dev *dev) mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + if (dev->dbdc_support) { + ret = mt7615_register_ext_phy(dev); + if (ret) + return ret; + } + return mt7615_init_debugfs(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 5ee52cd70a4b..d1806f198aed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) { struct mt7615_phy *phy = &dev->phy; @@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (id < 0) return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, pid, key, false); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c index 59d99264f5e5..a3d1cfa729ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c @@ -91,7 +91,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy) } return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_SET_TX_POWER_CTRL, false); + MCU_EXT_CMD(SET_TX_POWER_CTRL), false); } static void @@ -185,36 +185,35 @@ mt7615_tm_set_tx_antenna(struct mt7615_phy *phy, bool en) for (i = 0; i < 4; i++) { mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i), MT_WF_PHY_RFINTF3_0_ANT, - (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa); - + (mask & BIT(i)) ? 0 : 0xa); } /* 2.4 GHz band */ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0), - (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b); + (mask & BIT(0)) ? 0x8 : 0x1b); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2), - (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b); + (mask & BIT(1)) ? 0xe : 0x1b); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0), - (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf); + (mask & BIT(2)) ? 0x0 : 0xf); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2), - (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf); + (mask & BIT(3)) ? 0x6 : 0xf); /* 5 GHz band */ mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1), - (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b); + (mask & BIT(0)) ? 0xd : 0x1b); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3), - (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b); + (mask & BIT(1)) ? 0x13 : 0x1b); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1), - (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf); + (mask & BIT(2)) ? 0x5 : 0xf); mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3), - (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf); + (mask & BIT(3)) ? 0xb : 0xf); for (i = 0; i < 4; i++) { u32 val; val = mt7615_rf_rr(dev, i, 0x48); val &= ~(0x3ff << 20); - if (td->tx_antenna_mask & BIT(i)) + if (mask & BIT(i)) val |= 3 << 20; else val |= (2 << 28) | (2 << 26) | (8 << 20); @@ -229,7 +228,7 @@ mt7615_tm_set_tx_frames(struct mt7615_phy *phy, bool en) struct ieee80211_tx_info *info; struct sk_buff *skb = phy->mt76->test.tx_skb; - mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); mt7615_tm_set_tx_antenna(phy, en); mt7615_tm_set_rx_enable(dev, !en); if (!en || !skb) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c index 028ff432d811..0ebb4c3c336a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -21,7 +21,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, int ret, ep, len, pad; mt7615_mcu_fill_msg(dev, skb, cmd, seq); - if (cmd != MCU_CMD_FW_SCATTER) + if (cmd != MCU_CMD(FW_SCATTER)) ep = MT_EP_OUT_INBAND_CMD; else ep = MT_EP_OUT_AC_BE; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index bd2939ebcbf4..5a6d7829c6e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); static void mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); - txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); memset(txwi, 0, MT_USB_TXD_SIZE); mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); skb_push(skb, MT_USB_TXD_SIZE); @@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct sk_buff *skb = tx_info->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct mt7615_sta *msta; - int pad; + int pad, err, pktid; msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL; + if (!wcid) + wcid = &dev->mt76.global_wcid; + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta && !msta->rate_probe) { /* request to configure sampling rate */ @@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, spin_unlock_bh(&dev->mt76.lock); } - mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb); if (mt76_is_usb(mdev)) { u32 len = skb->len; @@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, pad = round_up(skb->len, 4) - skb->len; } - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c index af43bcb54578..306e9eaea917 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c @@ -7,9 +7,6 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm) { struct mt76_dev *dev = phy->dev; - if (!pm->enable) - return 0; - if (mt76_is_usb(dev)) return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 26b4b875dcc0..f79e3d5084f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -13,8 +13,8 @@ int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option) .addr = cpu_to_le32(addr), }; - return mt76_mcu_send_msg(dev, MCU_CMD_FW_START_REQ, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_CMD(FW_START_REQ), &req, + sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_firmware); @@ -27,8 +27,8 @@ int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get) .op = cpu_to_le32(op), }; - return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_SEM_CONTROL, &req, - sizeof(req), true); + return mt76_mcu_send_msg(dev, MCU_CMD(PATCH_SEM_CONTROL), + &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_patch_sem_ctrl); @@ -41,8 +41,8 @@ int mt76_connac_mcu_start_patch(struct mt76_dev *dev) .check_crc = 0, }; - return mt76_mcu_send_msg(dev, MCU_CMD_PATCH_FINISH_REQ, &req, - sizeof(req), true); + return mt76_mcu_send_msg(dev, MCU_CMD(PATCH_FINISH_REQ), + &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_start_patch); @@ -64,9 +64,9 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len, if (is_mt7921(dev) && (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000)) - cmd = MCU_CMD_PATCH_START_REQ; + cmd = MCU_CMD(PATCH_START_REQ); else - cmd = MCU_CMD_TARGET_ADDRESS_LEN_REQ; + cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ); return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true); } @@ -160,7 +160,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy) memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); - return mt76_mcu_skb_send_msg(dev, skb, MCU_CMD_SET_CHAN_DOMAIN, false); + return mt76_mcu_skb_send_msg(dev, skb, MCU_CE_CMD(SET_CHAN_DOMAIN), + false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_channel_domain); @@ -176,7 +177,7 @@ int mt76_connac_mcu_set_mac_enable(struct mt76_dev *dev, int band, bool enable, .band = band, }; - return mt76_mcu_send_msg(dev, MCU_EXT_CMD_MAC_INIT_CTRL, &req_mac, + return mt76_mcu_send_msg(dev, MCU_EXT_CMD(MAC_INIT_CTRL), &req_mac, sizeof(req_mac), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_mac_enable); @@ -198,8 +199,8 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif) if (vif->type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - return mt76_mcu_send_msg(dev, MCU_CMD_SET_PS_PROFILE, &req, - sizeof(req), false); + return mt76_mcu_send_msg(dev, MCU_CE_CMD(SET_PS_PROFILE), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_vif_ps); @@ -218,7 +219,7 @@ int mt76_connac_mcu_set_rts_thresh(struct mt76_dev *dev, u32 val, u8 band) .pkt_thresh = cpu_to_le32(0x2), }; - return mt76_mcu_send_msg(dev, MCU_EXT_CMD_PROTECT_CTRL, &req, + return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PROTECT_CTRL), &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rts_thresh); @@ -257,11 +258,8 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len, ntlv = le16_to_cpu(ntlv_hdr->tlv_num); ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1); - if (sta_hdr) { - u16 size = le16_to_cpu(sta_hdr->len); - - sta_hdr->len = cpu_to_le16(size + len); - } + if (sta_hdr) + le16_add_cpu(&sta_hdr->len, len); return ptlv; } @@ -1071,7 +1069,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); - cmd = enable ? MCU_UNI_CMD_DEV_INFO_UPDATE : MCU_UNI_CMD_BSS_INFO_UPDATE; + cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE); data = enable ? (void *)&dev_req : (void *)&basic_req; len = enable ? sizeof(dev_req) : sizeof(basic_req); @@ -1079,7 +1077,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, if (err < 0) return err; - cmd = enable ? MCU_UNI_CMD_BSS_INFO_UPDATE : MCU_UNI_CMD_DEV_INFO_UPDATE; + cmd = enable ? MCU_UNI_CMD(BSS_INFO_UPDATE) : MCU_UNI_CMD(DEV_INFO_UPDATE); data = enable ? (void *)&basic_req : (void *)&dev_req; len = enable ? sizeof(basic_req) : sizeof(dev_req); @@ -1131,7 +1129,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl, wtbl_hdr); - ret = mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, true); + ret = mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); if (ret) return ret; @@ -1141,8 +1140,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx); - return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_STA_REC_UPDATE, - true); + return mt76_mcu_skb_send_msg(dev, skb, + MCU_UNI_CMD(STA_REC_UPDATE), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba); @@ -1179,7 +1178,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (ht_cap->ht_supported) mode |= PHY_MODE_GN; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_24G; } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { mode |= PHY_MODE_A; @@ -1190,12 +1189,8 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (vht_cap->vht_supported) mode |= PHY_MODE_AC; - if (he_cap->has_he) { - if (band == NL80211_BAND_6GHZ) - mode |= PHY_MODE_AX_6G; - else - mode |= PHY_MODE_AX_5G; - } + if (he_cap && he_cap->has_he && band == NL80211_BAND_5GHZ) + mode |= PHY_MODE_AX_5G; } return mode; @@ -1318,7 +1313,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; basic_req.basic.hw_bss_idx = idx; if (band == NL80211_BAND_6GHZ) - basic_req.basic.phymode_ext = BIT(0); + basic_req.basic.phymode_ext = PHY_MODE_AX_6G; basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, NULL); basic_req.basic.nonht_basic_phy = cpu_to_le16(basic_phy); @@ -1352,7 +1347,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, basic_req.basic.sta_idx = cpu_to_le16(wcid->idx); basic_req.basic.conn_state = !enable; - err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &basic_req, + err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &basic_req, sizeof(basic_req), true); if (err < 0) return err; @@ -1390,7 +1385,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, mt76_connac_mcu_uni_bss_he_tlv(phy, vif, (struct tlv *)&he_req.he); - err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, + err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &he_req, sizeof(he_req), true); if (err < 0) return err; @@ -1428,7 +1423,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy, else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) rlm_req.rlm.sco = 3; /* SCB */ - return mt76_mcu_send_msg(mdev, MCU_UNI_CMD_BSS_INFO_UPDATE, &rlm_req, + return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req, sizeof(rlm_req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss); @@ -1522,7 +1517,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, req->scan_func |= SCAN_FUNC_RANDOM_MAC; } - err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_START_HW_SCAN, false); + err = mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(START_HW_SCAN), + false); if (err < 0) clear_bit(MT76_HW_SCANNING, &phy->state); @@ -1550,8 +1546,8 @@ int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy, ieee80211_scan_completed(phy->hw, &info); } - return mt76_mcu_send_msg(phy->dev, MCU_CMD_CANCEL_HW_SCAN, &req, - sizeof(req), false); + return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(CANCEL_HW_SCAN), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_cancel_hw_scan); @@ -1637,7 +1633,8 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len); } - return mt76_mcu_skb_send_msg(mdev, skb, MCU_CMD_SCHED_SCAN_REQ, false); + return mt76_mcu_skb_send_msg(mdev, skb, MCU_CE_CMD(SCHED_SCAN_REQ), + false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_req); @@ -1657,8 +1654,8 @@ int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy, else clear_bit(MT76_HW_SCHED_SCANNING, &phy->state); - return mt76_mcu_send_msg(phy->dev, MCU_CMD_SCHED_SCAN_ENABLE, &req, - sizeof(req), false); + return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SCHED_SCAN_ENABLE), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable); @@ -1670,8 +1667,8 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev) memcpy(req.data, "assert", 7); - return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req), - false); + return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config); @@ -1683,8 +1680,8 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable) snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable); - return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req), - false); + return mt76_mcu_send_msg(dev, MCU_CE_CMD(CHIP_CONFIG), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep); @@ -1786,8 +1783,8 @@ int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy) struct sk_buff *skb; int ret, i; - ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CMD_GET_NIC_CAPAB, NULL, - 0, true, &skb); + ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), + NULL, 0, true, &skb); if (ret) return ret; @@ -1885,30 +1882,6 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, } } -static s8 mt76_connac_get_sar_power(struct mt76_phy *phy, - struct ieee80211_channel *chan, - s8 target_power) -{ - const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; - struct mt76_freq_range_power *frp = phy->frp; - int freq, i; - - if (!capa || !frp) - return target_power; - - freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); - for (i = 0 ; i < capa->num_freq_ranges; i++) { - if (frp[i].range && - freq >= frp[i].range->start_freq && - freq < frp[i].range->end_freq) { - target_power = min_t(s8, frp[i].power, target_power); - break; - } - } - - return target_power; -} - static s8 mt76_connac_get_ch_power(struct mt76_phy *phy, struct ieee80211_channel *chan, s8 target_power) @@ -2008,12 +1981,12 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, } batch_size = DIV_ROUND_UP(n_chan, batch_len); - if (!phy->cap.has_5ghz) - last_ch = chan_list_2ghz[n_chan - 1]; - else if (phy->cap.has_6ghz) - last_ch = chan_list_6ghz[n_chan - 1]; + if (phy->cap.has_6ghz) + last_ch = chan_list_6ghz[ARRAY_SIZE(chan_list_6ghz) - 1]; + else if (phy->cap.has_5ghz) + last_ch = chan_list_5ghz[ARRAY_SIZE(chan_list_5ghz) - 1]; else - last_ch = chan_list_5ghz[n_chan - 1]; + last_ch = chan_list_2ghz[ARRAY_SIZE(chan_list_2ghz) - 1]; for (i = 0; i < batch_size; i++) { struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {}; @@ -2053,8 +2026,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, reg_power = mt76_connac_get_ch_power(phy, &chan, tx_power); - sar_power = mt76_connac_get_sar_power(phy, &chan, - reg_power); + sar_power = mt76_get_sar_power(phy, &chan, reg_power); mt76_get_rate_power_limits(phy, &chan, &limits, sar_power); @@ -2070,7 +2042,8 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, memcpy(skb->data, &tx_power_tlv, sizeof(tx_power_tlv)); err = mt76_mcu_skb_send_msg(dev, skb, - MCU_CMD_SET_RATE_TX_POWER, false); + MCU_CE_CMD(SET_RATE_TX_POWER), + false); if (err < 0) return err; } @@ -2143,7 +2116,7 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); } - return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true); + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter); @@ -2162,8 +2135,8 @@ int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw, .bss_idx = mvif->idx, }; - return mt76_mcu_send_msg(phy->dev, MCU_CMD_SET_P2P_OPPPS, &req, - sizeof(req), false); + return mt76_mcu_send_msg(phy->dev, MCU_CE_CMD(SET_P2P_OPPPS), + &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_p2p_oppps); @@ -2249,7 +2222,8 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN); memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN); - return mt76_mcu_skb_send_msg(phy->dev, skb, MCU_UNI_CMD_OFFLOAD, true); + return mt76_mcu_skb_send_msg(phy->dev, skb, + MCU_UNI_CMD(OFFLOAD), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_gtk_rekey); @@ -2275,8 +2249,8 @@ mt76_connac_mcu_set_arp_filter(struct mt76_dev *dev, struct ieee80211_vif *vif, }, }; - return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req, + sizeof(req), true); } static int @@ -2301,8 +2275,8 @@ mt76_connac_mcu_set_gtk_rekey(struct mt76_dev *dev, struct ieee80211_vif *vif, }, }; - return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(OFFLOAD), &req, + sizeof(req), true); } static int @@ -2331,8 +2305,8 @@ mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, }, }; - return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, + sizeof(req), true); } static int @@ -2366,7 +2340,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len); memcpy(ptlv->mask, pattern->mask, DIV_ROUND_UP(pattern->pattern_len, 8)); - return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_SUSPEND, true); + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SUSPEND), true); } static int @@ -2418,8 +2392,8 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, else if (mt76_is_sdio(dev)) req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO; - return mt76_mcu_send_msg(dev, MCU_UNI_CMD_SUSPEND, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(SUSPEND), &req, + sizeof(req), true); } int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) @@ -2452,8 +2426,8 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) else if (mt76_is_sdio(dev)) req.hdr.hif_type = 0; - return mt76_mcu_send_msg(dev, MCU_UNI_CMD_HIF_CTRL, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req, + sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend); @@ -2461,7 +2435,7 @@ void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt76_phy *phy = priv; - bool suspend = test_bit(MT76_STATE_SUSPEND, &phy->state); + bool suspend = !test_bit(MT76_STATE_RUNNING, &phy->state); struct ieee80211_hw *hw = phy->hw; struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; int i; @@ -2488,8 +2462,8 @@ u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset) .addr = cpu_to_le32(offset), }; - return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req), - true); + return mt76_mcu_send_msg(dev, MCU_CE_QUERY(REG_READ), &req, + sizeof(req), true); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_rr); @@ -2503,7 +2477,8 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) .val = cpu_to_le32(val), }; - mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false); + mt76_mcu_send_msg(dev, MCU_CE_CMD(REG_WRITE), &req, + sizeof(req), false); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 4e2c9dafd776..5baf8370b7bd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -11,6 +11,76 @@ struct tlv { __le16 len; } __packed; +struct bss_info_omac { + __le16 tag; + __le16 len; + u8 hw_bss_idx; + u8 omac_idx; + u8 band_idx; + u8 rsv0; + __le32 conn_type; + u32 rsv1; +} __packed; + +struct bss_info_basic { + __le16 tag; + __le16 len; + __le32 network_type; + u8 active; + u8 rsv0; + __le16 bcn_interval; + u8 bssid[ETH_ALEN]; + u8 wmm_idx; + u8 dtim_period; + u8 bmc_wcid_lo; + u8 cipher; + u8 phy_mode; + u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */ + u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */ + u8 bmc_wcid_hi; /* high Byte and version */ + u8 rsv[2]; +} __packed; + +struct bss_info_rf_ch { + __le16 tag; + __le16 len; + u8 pri_ch; + u8 center_ch0; + u8 center_ch1; + u8 bw; + u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */ + u8 he_all_disable; /* 1: disallow all HETB, 0: allow */ + u8 rsv[2]; +} __packed; + +struct bss_info_ext_bss { + __le16 tag; + __le16 len; + __le32 mbss_tsf_offset; /* in unit of us */ + u8 rsv[8]; +} __packed; + +enum { + BSS_INFO_OMAC, + BSS_INFO_BASIC, + BSS_INFO_RF_CH, /* optional, for BT/LTE coex */ + BSS_INFO_PM, /* sta only */ + BSS_INFO_UAPSD, /* sta only */ + BSS_INFO_ROAM_DETECT, /* obsoleted */ + BSS_INFO_LQ_RM, /* obsoleted */ + BSS_INFO_EXT_BSS, + BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */ + BSS_INFO_SYNC_MODE, /* obsoleted */ + BSS_INFO_RA, + BSS_INFO_HW_AMSDU, + BSS_INFO_BSS_COLOR, + BSS_INFO_HE_BASIC, + BSS_INFO_PROTECT_INFO, + BSS_INFO_OFFLOAD, + BSS_INFO_11V_MBSSID, + BSS_INFO_MAX_NUM +}; + /* sta_rec */ struct sta_ntlv_hdr { @@ -54,7 +124,7 @@ struct sta_rec_vht { __le32 vht_cap; __le16 vht_rx_mcs_map; __le16 vht_tx_mcs_map; - /* mt7921 */ + /* mt7915 - mt7921 */ u8 rts_bw_sig; u8 rsv[3]; } __packed; @@ -152,6 +222,191 @@ struct sta_rec_he_6g_capa { u8 rsv[2]; } __packed; +struct sec_key { + u8 cipher_id; + u8 cipher_len; + u8 key_id; + u8 key_len; + u8 key[32]; +} __packed; + +struct sta_rec_sec { + __le16 tag; + __le16 len; + u8 add; + u8 n_cipher; + u8 rsv[2]; + + struct sec_key key[2]; +} __packed; + +struct sta_rec_bf { + __le16 tag; + __le16 len; + + __le16 pfmu; /* 0xffff: no access right for PFMU */ + bool su_mu; /* 0: SU, 1: MU */ + u8 bf_cap; /* 0: iBF, 1: eBF */ + u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */ + u8 ndpa_rate; + u8 ndp_rate; + u8 rept_poll_rate; + u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */ + u8 ncol; + u8 nrow; + u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */ + + u8 mem_total; + u8 mem_20m; + struct { + u8 row; + u8 col: 6, row_msb: 2; + } mem[4]; + + __le16 smart_ant; + u8 se_idx; + u8 auto_sounding; /* b7: low traffic indicator + * b6: Stop sounding for this entry + * b5 ~ b0: postpone sounding + */ + u8 ibf_timeout; + u8 ibf_dbw; + u8 ibf_ncol; + u8 ibf_nrow; + u8 nrow_bw160; + u8 ncol_bw160; + u8 ru_start_idx; + u8 ru_end_idx; + + bool trigger_su; + bool trigger_mu; + bool ng16_su; + bool ng16_mu; + bool codebook42_su; + bool codebook75_mu; + + u8 he_ltf; + u8 rsv[3]; +} __packed; + +struct sta_rec_bfee { + __le16 tag; + __le16 len; + bool fb_identity_matrix; /* 1: feedback identity matrix */ + bool ignore_feedback; /* 1: ignore */ + u8 rsv[2]; +} __packed; + +struct sta_rec_muru { + __le16 tag; + __le16 len; + + struct { + bool ofdma_dl_en; + bool ofdma_ul_en; + bool mimo_dl_en; + bool mimo_ul_en; + u8 rsv[4]; + } cfg; + + struct { + u8 punc_pream_rx; + bool he_20m_in_40m_2g; + bool he_20m_in_160m; + bool he_80m_in_160m; + bool lt16_sigb; + bool rx_su_comp_sigb; + bool rx_su_non_comp_sigb; + u8 rsv; + } ofdma_dl; + + struct { + u8 t_frame_dur; + u8 mu_cascading; + u8 uo_ra; + u8 he_2x996_tone; + u8 rx_t_frame_11ac; + u8 rsv[3]; + } ofdma_ul; + + struct { + bool vht_mu_bfee; + bool partial_bw_dl_mimo; + u8 rsv[2]; + } mimo_dl; + + struct { + bool full_ul_mimo; + bool partial_ul_mimo; + u8 rsv[2]; + } mimo_ul; +} __packed; + +struct sta_phy { + u8 type; + u8 flag; + u8 stbc; + u8 sgi; + u8 bw; + u8 ldpc; + u8 mcs; + u8 nss; + u8 he_ltf; +}; + +struct sta_rec_ra { + __le16 tag; + __le16 len; + + u8 valid; + u8 auto_rate; + u8 phy_mode; + u8 channel; + u8 bw; + u8 disable_cck; + u8 ht_mcs32; + u8 ht_gf; + u8 ht_mcs[4]; + u8 mmps_mode; + u8 gband_256; + u8 af; + u8 auth_wapi_mode; + u8 rate_len; + + u8 supp_mode; + u8 supp_cck_rate; + u8 supp_ofdm_rate; + __le32 supp_ht_mcs; + __le16 supp_vht_mcs[4]; + + u8 op_mode; + u8 op_vht_chan_width; + u8 op_vht_rx_nss; + u8 op_vht_rx_nss_type; + + __le32 sta_cap; + + struct sta_phy phy; +} __packed; + +struct sta_rec_ra_fixed { + __le16 tag; + __le16 len; + + __le32 field; + u8 op_mode; + u8 op_vht_chan_width; + u8 op_vht_rx_nss; + u8 op_vht_rx_nss_type; + + struct sta_phy phy; + + u8 spe_en; + u8 short_preamble; + u8 is_5g; + u8 mmps_mode; +} __packed; + /* wtbl_rec */ struct wtbl_req_hdr { @@ -234,6 +489,7 @@ struct wtbl_ba { __le16 sn; u8 ba_en; u8 ba_winsize_idx; + /* originator & recipient */ __le16 ba_winsize; /* recipient only */ u8 peer_addr[ETH_ALEN]; @@ -304,12 +560,17 @@ struct wtbl_raw { #define MT76_CONNAC_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_bf) + \ sizeof(struct sta_rec_ht) + \ sizeof(struct sta_rec_he) + \ sizeof(struct sta_rec_ba) + \ sizeof(struct sta_rec_vht) + \ sizeof(struct sta_rec_uapsd) + \ sizeof(struct sta_rec_amsdu) + \ + sizeof(struct sta_rec_muru) + \ + sizeof(struct sta_rec_bfee) + \ + sizeof(struct sta_rec_ra) + \ + sizeof(struct sta_rec_ra_fixed) + \ sizeof(struct sta_rec_he_6g_capa) + \ sizeof(struct tlv) + \ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) @@ -423,7 +684,8 @@ enum { #define PHY_MODE_AC BIT(5) #define PHY_MODE_AX_24G BIT(6) #define PHY_MODE_AX_5G BIT(7) -#define PHY_MODE_AX_6G BIT(8) + +#define PHY_MODE_AX_6G BIT(0) /* phymode_ext */ #define MODE_CCK BIT(0) #define MODE_OFDM BIT(1) @@ -431,6 +693,21 @@ enum { #define MODE_VHT BIT(3) #define MODE_HE BIT(4) +#define STA_CAP_WMM BIT(0) +#define STA_CAP_SGI_20 BIT(4) +#define STA_CAP_SGI_40 BIT(5) +#define STA_CAP_TX_STBC BIT(6) +#define STA_CAP_RX_STBC BIT(7) +#define STA_CAP_VHT_SGI_80 BIT(16) +#define STA_CAP_VHT_SGI_160 BIT(17) +#define STA_CAP_VHT_TX_STBC BIT(18) +#define STA_CAP_VHT_RX_STBC BIT(19) +#define STA_CAP_VHT_LDPC BIT(23) +#define STA_CAP_LDPC BIT(24) +#define STA_CAP_HT BIT(26) +#define STA_CAP_VHT BIT(27) +#define STA_CAP_HE BIT(28) + enum { PHY_TYPE_HR_DSSS_INDEX = 0, PHY_TYPE_ERP_INDEX, @@ -489,6 +766,121 @@ enum { DEV_INFO_MAX_NUM }; +/* event table */ +enum { + MCU_EVENT_TARGET_ADDRESS_LEN = 0x01, + MCU_EVENT_FW_START = 0x01, + MCU_EVENT_GENERIC = 0x01, + MCU_EVENT_ACCESS_REG = 0x02, + MCU_EVENT_MT_PATCH_SEM = 0x04, + MCU_EVENT_REG_ACCESS = 0x05, + MCU_EVENT_LP_INFO = 0x07, + MCU_EVENT_SCAN_DONE = 0x0d, + MCU_EVENT_TX_DONE = 0x0f, + MCU_EVENT_ROC = 0x10, + MCU_EVENT_BSS_ABSENCE = 0x11, + MCU_EVENT_BSS_BEACON_LOSS = 0x13, + MCU_EVENT_CH_PRIVILEGE = 0x18, + MCU_EVENT_SCHED_SCAN_DONE = 0x23, + MCU_EVENT_DBG_MSG = 0x27, + MCU_EVENT_TXPWR = 0xd0, + MCU_EVENT_EXT = 0xed, + MCU_EVENT_RESTART_DL = 0xef, + MCU_EVENT_COREDUMP = 0xf0, +}; + +/* ext event table */ +enum { + MCU_EXT_EVENT_PS_SYNC = 0x5, + MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, + MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, + MCU_EXT_EVENT_ASSERT_DUMP = 0x23, + MCU_EXT_EVENT_RDD_REPORT = 0x3a, + MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, + MCU_EXT_EVENT_BCC_NOTIFY = 0x75, + MCU_EXT_EVENT_MURU_CTRL = 0x9f, +}; + +enum { + MCU_Q_QUERY, + MCU_Q_SET, + MCU_Q_RESERVED, + MCU_Q_NA +}; + +enum { + MCU_S2D_H2N, + MCU_S2D_C2N, + MCU_S2D_H2C, + MCU_S2D_H2CN +}; + +enum { + PATCH_NOT_DL_SEM_FAIL, + PATCH_IS_DL, + PATCH_NOT_DL_SEM_SUCCESS, + PATCH_REL_SEM_SUCCESS +}; + +enum { + FW_STATE_INITIAL, + FW_STATE_FW_DOWNLOAD, + FW_STATE_NORMAL_OPERATION, + FW_STATE_NORMAL_TRX, + FW_STATE_RDY = 7 +}; + +enum { + CH_SWITCH_NORMAL = 0, + CH_SWITCH_SCAN = 3, + CH_SWITCH_MCC = 4, + CH_SWITCH_DFS = 5, + CH_SWITCH_BACKGROUND_SCAN_START = 6, + CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7, + CH_SWITCH_BACKGROUND_SCAN_STOP = 8, + CH_SWITCH_SCAN_BYPASS_DPD = 9 +}; + +enum { + THERMAL_SENSOR_TEMP_QUERY, + THERMAL_SENSOR_MANUAL_CTRL, + THERMAL_SENSOR_INFO_QUERY, + THERMAL_SENSOR_TASK_CTRL, +}; + +enum mcu_cipher_type { + MCU_CIPHER_NONE = 0, + MCU_CIPHER_WEP40, + MCU_CIPHER_WEP104, + MCU_CIPHER_WEP128, + MCU_CIPHER_TKIP, + MCU_CIPHER_AES_CCMP, + MCU_CIPHER_CCMP_256, + MCU_CIPHER_GCMP, + MCU_CIPHER_GCMP_256, + MCU_CIPHER_WAPI, + MCU_CIPHER_BIP_CMAC_128, +}; + +enum { + EE_MODE_EFUSE, + EE_MODE_BUFFER, +}; + +enum { + EE_FORMAT_BIN, + EE_FORMAT_WHOLE, + EE_FORMAT_MULTIPLE, +}; + +enum { + MCU_PHY_STATE_TX_RATE, + MCU_PHY_STATE_RX_RATE, + MCU_PHY_STATE_RSSI, + MCU_PHY_STATE_CONTENTION_RX_RATE, + MCU_PHY_STATE_OFDMLQ_CNINFO, +}; + #define MCU_CMD_ACK BIT(0) #define MCU_CMD_UNI BIT(1) #define MCU_CMD_QUERY BIT(2) @@ -496,29 +888,51 @@ enum { #define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | \ MCU_CMD_QUERY) -#define MCU_FW_PREFIX BIT(31) -#define MCU_UNI_PREFIX BIT(30) -#define MCU_CE_PREFIX BIT(29) -#define MCU_QUERY_PREFIX BIT(28) -#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \ - MCU_CE_PREFIX | MCU_QUERY_PREFIX) - -#define MCU_QUERY_MASK BIT(16) +#define __MCU_CMD_FIELD_ID GENMASK(7, 0) +#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8) +#define __MCU_CMD_FIELD_QUERY BIT(16) +#define __MCU_CMD_FIELD_UNI BIT(17) +#define __MCU_CMD_FIELD_CE BIT(18) +#define __MCU_CMD_FIELD_WA BIT(19) + +#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, \ + MCU_CMD_##_t) +#define MCU_EXT_CMD(_t) (MCU_CMD(EXT_CID) | \ + FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \ + MCU_EXT_CMD_##_t)) +#define MCU_EXT_QUERY(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_QUERY) +#define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \ + FIELD_PREP(__MCU_CMD_FIELD_ID, \ + MCU_UNI_CMD_##_t)) +#define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \ + FIELD_PREP(__MCU_CMD_FIELD_ID, \ + MCU_CE_CMD_##_t)) +#define MCU_CE_QUERY(_t) (MCU_CE_CMD(_t) | __MCU_CMD_FIELD_QUERY) + +#define MCU_WA_CMD(_t) (MCU_CMD(_t) | __MCU_CMD_FIELD_WA) +#define MCU_WA_EXT_CMD(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_WA) +#define MCU_WA_PARAM_CMD(_t) (MCU_WA_CMD(WA_PARAM) | \ + FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \ + MCU_WA_PARAM_CMD_##_t)) enum { MCU_EXT_CMD_EFUSE_ACCESS = 0x01, MCU_EXT_CMD_RF_REG_ACCESS = 0x02, + MCU_EXT_CMD_RF_TEST = 0x04, MCU_EXT_CMD_PM_STATE_CTRL = 0x07, MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, MCU_EXT_CMD_FW_LOG_2_HOST = 0x13, + MCU_EXT_CMD_TXBF_ACTION = 0x1e, MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21, + MCU_EXT_CMD_THERMAL_PROT = 0x23, MCU_EXT_CMD_STA_REC_UPDATE = 0x25, MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26, MCU_EXT_CMD_EDCA_UPDATE = 0x27, MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, - MCU_EXT_CMD_GET_TEMP = 0x2c, + MCU_EXT_CMD_THERMAL_CTRL = 0x2c, MCU_EXT_CMD_WTBL_UPDATE = 0x32, + MCU_EXT_CMD_SET_DRR_CTRL = 0x36, MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, MCU_EXT_CMD_ATE_CTRL = 0x3d, MCU_EXT_CMD_PROTECT_CTRL = 0x3e, @@ -527,59 +941,74 @@ enum { MCU_EXT_CMD_RX_HDR_TRANS = 0x47, MCU_EXT_CMD_MUAR_UPDATE = 0x48, MCU_EXT_CMD_BCN_OFFLOAD = 0x49, + MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a, MCU_EXT_CMD_SET_RX_PATH = 0x4e, + MCU_EXT_CMD_EFUSE_FREE_BLOCK = 0x4f, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, MCU_EXT_CMD_RXDCOC_CAL = 0x59, + MCU_EXT_CMD_GET_MIB_INFO = 0x5a, MCU_EXT_CMD_TXDPD_CAL = 0x60, MCU_EXT_CMD_CAL_CACHE = 0x67, - MCU_EXT_CMD_SET_RDD_TH = 0x7c, + MCU_EXT_CMD_SET_RADAR_TH = 0x7c, MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, + MCU_EXT_CMD_MWDS_SUPPORT = 0x80, + MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, + MCU_EXT_CMD_SCS_CTRL = 0x82, + MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94, + MCU_EXT_CMD_FW_DBG_CTRL = 0x95, + MCU_EXT_CMD_SET_RDD_TH = 0x9d, + MCU_EXT_CMD_MURU_CTRL = 0x9f, + MCU_EXT_CMD_SET_SPR = 0xa8, + MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab, + MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac, + MCU_EXT_CMD_PHY_STAT_INFO = 0xad, }; enum { - MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01, - MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02, - MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03, - MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05, - MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06, - MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07, + MCU_UNI_CMD_DEV_INFO_UPDATE = 0x01, + MCU_UNI_CMD_BSS_INFO_UPDATE = 0x02, + MCU_UNI_CMD_STA_REC_UPDATE = 0x03, + MCU_UNI_CMD_SUSPEND = 0x05, + MCU_UNI_CMD_OFFLOAD = 0x06, + MCU_UNI_CMD_HIF_CTRL = 0x07, }; enum { - MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01, - MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02, + MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01, + MCU_CMD_FW_START_REQ = 0x02, MCU_CMD_INIT_ACCESS_REG = 0x3, - MCU_CMD_NIC_POWER_CTRL = MCU_FW_PREFIX | 0x4, - MCU_CMD_PATCH_START_REQ = MCU_FW_PREFIX | 0x05, - MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07, - MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10, + MCU_CMD_NIC_POWER_CTRL = 0x4, + MCU_CMD_PATCH_START_REQ = 0x05, + MCU_CMD_PATCH_FINISH_REQ = 0x07, + MCU_CMD_PATCH_SEM_CONTROL = 0x10, + MCU_CMD_WA_PARAM = 0xc4, MCU_CMD_EXT_CID = 0xed, - MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xee, - MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xef, + MCU_CMD_FW_SCATTER = 0xee, + MCU_CMD_RESTART_DL_REQ = 0xef, }; /* offload mcu commands */ enum { - MCU_CMD_TEST_CTRL = MCU_CE_PREFIX | 0x01, - MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03, - MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05, - MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f, - MCU_CMD_SET_BSS_CONNECTED = MCU_CE_PREFIX | 0x16, - MCU_CMD_SET_BSS_ABORT = MCU_CE_PREFIX | 0x17, - MCU_CMD_CANCEL_HW_SCAN = MCU_CE_PREFIX | 0x1b, - MCU_CMD_SET_ROC = MCU_CE_PREFIX | 0x1d, - MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33, - MCU_CMD_SET_RATE_TX_POWER = MCU_CE_PREFIX | 0x5d, - MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61, - MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62, - MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a, - MCU_CMD_SET_MU_EDCA_PARMS = MCU_CE_PREFIX | 0xb0, - MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0, - MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0, - MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca, - MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5, - MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd, - MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0, + MCU_CE_CMD_TEST_CTRL = 0x01, + MCU_CE_CMD_START_HW_SCAN = 0x03, + MCU_CE_CMD_SET_PS_PROFILE = 0x05, + MCU_CE_CMD_SET_CHAN_DOMAIN = 0x0f, + MCU_CE_CMD_SET_BSS_CONNECTED = 0x16, + MCU_CE_CMD_SET_BSS_ABORT = 0x17, + MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b, + MCU_CE_CMD_SET_ROC = 0x1d, + MCU_CE_CMD_SET_P2P_OPPPS = 0x33, + MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d, + MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61, + MCU_CE_CMD_SCHED_SCAN_REQ = 0x62, + MCU_CE_CMD_GET_NIC_CAPAB = 0x8a, + MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0, + MCU_CE_CMD_REG_WRITE = 0xc0, + MCU_CE_CMD_REG_READ = 0xc0, + MCU_CE_CMD_CHIP_CONFIG = 0xca, + MCU_CE_CMD_FWLOG_2_HOST = 0xc5, + MCU_CE_CMD_GET_WTBL = 0xcd, + MCU_CE_CMD_GET_TXPWR = 0xd0, }; enum { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 0bac39bf3b66..66d47c70111a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -237,7 +237,10 @@ int mt76x0_register_device(struct mt76x02_dev *dev) { int ret; - mt76x02_init_device(dev); + ret = mt76x02_init_device(dev); + if (ret) + return ret; + mt76x02_config_mac_addr_list(dev); ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index 700ae9c12f1d..07380cce8755 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -31,6 +31,32 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) mt76_txq_schedule_all(&dev->mphy); } +int mt76x0_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + int err = -EINVAL, power = hw->conf.power_level * 2; + struct mt76x02_dev *dev = hw->priv; + struct mt76_phy *mphy = &dev->mphy; + + mutex_lock(&dev->mt76.mutex); + if (!cfg80211_chandef_valid(&mphy->chandef)) + goto out; + + err = mt76_init_sar_power(hw, sar); + if (err) + goto out; + + dev->txpower_conf = mt76_get_sar_power(mphy, mphy->chandef.chan, + power); + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) + mt76x0_phy_set_txpower(dev); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} +EXPORT_SYMBOL_GPL(mt76x0_set_sar_specs); + int mt76x0_config(struct ieee80211_hw *hw, u32 changed) { struct mt76x02_dev *dev = hw->priv; @@ -44,9 +70,13 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - dev->txpower_conf = hw->conf.power_level * 2; + struct mt76_phy *mphy = &dev->mphy; - if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + dev->txpower_conf = hw->conf.power_level * 2; + dev->txpower_conf = mt76_get_sar_power(mphy, + mphy->chandef.chan, + dev->txpower_conf); + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) mt76x0_phy_set_txpower(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 6953f253a28a..99dcb8feb9f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -49,6 +49,8 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset); void mt76x0_mac_stop(struct mt76x02_dev *dev); int mt76x0_config(struct ieee80211_hw *hw, u32 changed); +int mt76x0_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); /* PHY */ void mt76x0_phy_init(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index f19228fc5a70..9277ff38b7a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -85,6 +85,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .set_rts_threshold = mt76x02_set_rts_threshold, .get_antenna = mt76_get_antenna, .reconfig_complete = mt76x02_reconfig_complete, + .set_sar_specs = mt76x0_set_sar_specs, }; static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index f2b2fa733845..436daf6d6d86 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -141,6 +141,7 @@ static const struct ieee80211_ops mt76x0u_ops = { .set_tim = mt76_set_tim, .release_buffered_frames = mt76_release_buffered_frames, .get_antenna = mt76_get_antenna, + .set_sar_specs = mt76x0_set_sar_specs, }; static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 4d58c2c1c0ac..44d1a92d9a90 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -133,7 +133,7 @@ struct mt76x02_dev { extern struct ieee80211_rate mt76x02_rates[12]; -void mt76x02_init_device(struct mt76x02_dev *dev); +int mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index efd70ddc2fd1..2c6c03809b20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; enum mt76_qsel qsel; u32 flags; + int err; mt76_insert_hdr_pad(tx_info->skb); @@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ewma_pktlen_add(&msta->pktlen, tx_info->skb->len); } - return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags); + if (err && wcid) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pid); + + return err; } EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 1f17d86ff755..dd30f537676d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -138,7 +138,7 @@ mt76x02_led_set_brightness(struct led_classdev *led_cdev, mt76x02_led_set_config(mdev, 0xff, 0); } -void mt76x02_init_device(struct mt76x02_dev *dev) +int mt76x02_init_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; @@ -197,6 +197,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev) dev->mphy.chainmask = 0x101; dev->mphy.antenna_mask = 1; } + + return 0; } EXPORT_SYMBOL_GPL(mt76x02_init_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index a92a479aebaa..7b01a06d7f8d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -8,6 +8,35 @@ #include "eeprom.h" #include "../mt76x02_phy.h" +int mt76x2_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + int err = -EINVAL, power = hw->conf.power_level * 2; + struct mt76x02_dev *dev = hw->priv; + struct mt76_phy *mphy = &dev->mphy; + + mutex_lock(&dev->mt76.mutex); + if (!cfg80211_chandef_valid(&mphy->chandef)) + goto out; + + err = mt76_init_sar_power(hw, sar); + if (err) + goto out; + + dev->txpower_conf = mt76_get_sar_power(mphy, mphy->chandef.chan, + power); + /* convert to per-chain power for 2x2 devices */ + dev->txpower_conf -= 6; + + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) + mt76x2_phy_set_txpower(dev); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} +EXPORT_SYMBOL_GPL(mt76x2_set_sar_specs); + static void mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index d01f47c83eb1..be1217329a77 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -41,6 +41,8 @@ extern const struct ieee80211_ops mt76x2_ops; int mt76x2_register_device(struct mt76x02_dev *dev); int mt76x2_resume_device(struct mt76x02_dev *dev); +int mt76x2_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); void mt76x2_phy_power_on(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index c6fa8cf92529..e38e8e5685c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -292,8 +292,9 @@ int mt76x2_register_device(struct mt76x02_dev *dev) int ret; INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); - - mt76x02_init_device(dev); + ret = mt76x02_init_device(dev); + if (ret) + return ret; ret = mt76x2_init_hardware(dev); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 933125b07ea3..b38bb7a2362b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -78,8 +78,12 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - dev->txpower_conf = hw->conf.power_level * 2; + struct mt76_phy *mphy = &dev->mphy; + dev->txpower_conf = hw->conf.power_level * 2; + dev->txpower_conf = mt76_get_sar_power(mphy, + mphy->chandef.chan, + dev->txpower_conf); /* convert to per-chain power for 2x2 devices */ dev->txpower_conf -= 6; @@ -155,5 +159,6 @@ const struct ieee80211_ops mt76x2_ops = { .get_antenna = mt76_get_antenna, .set_rts_threshold = mt76x02_set_rts_threshold, .reconfig_complete = mt76x02_reconfig_complete, + .set_sar_specs = mt76x2_set_sar_specs, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 85dcdc22fbeb..33a14365ec9b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -194,7 +194,9 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) int err; INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); - mt76x02_init_device(dev); + err = mt76x02_init_device(dev); + if (err) + return err; err = mt76x2u_init_eeprom(dev); if (err < 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index b66836928d9d..ac07ed1f63a3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -78,12 +78,16 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - dev->txpower_conf = hw->conf.power_level * 2; + struct mt76_phy *mphy = &dev->mphy; + dev->txpower_conf = hw->conf.power_level * 2; + dev->txpower_conf = mt76_get_sar_power(mphy, + mphy->chandef.chan, + dev->txpower_conf); /* convert to per-chain power for 2x2 devices */ dev->txpower_conf -= 6; - if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) + if (test_bit(MT76_STATE_RUNNING, &mphy->state)) mt76x2_phy_set_txpower(dev); } @@ -121,4 +125,5 @@ const struct ieee80211_ops mt76x2u_ops = { .set_tim = mt76_set_tim, .release_buffered_frames = mt76_release_buffered_frames, .get_antenna = mt76_get_antenna, + .set_sar_specs = mt76x2_set_sar_specs, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index a15aa256d0cf..e96d1c31dd36 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -82,6 +82,225 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, mt7915_radar_trigger, "%lld\n"); static int +mt7915_muru_debug_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + + dev->muru_debug = val; + mt7915_mcu_muru_debug_set(dev, data); + + return 0; +} + +static int +mt7915_muru_debug_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + + *val = dev->muru_debug; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_muru_debug, mt7915_muru_debug_get, + mt7915_muru_debug_set, "%lld\n"); + +static int mt7915_muru_stats_show(struct seq_file *file, void *data) +{ + struct mt7915_phy *phy = file->private; + struct mt7915_dev *dev = phy->dev; + struct mt7915_mcu_muru_stats mu_stats = {}; + static const char * const dl_non_he_type[] = { + "CCK", "OFDM", "HT MIX", "HT GF", + "VHT SU", "VHT 2MU", "VHT 3MU", "VHT 4MU" + }; + static const char * const dl_he_type[] = { + "HE SU", "HE EXT", "HE 2MU", "HE 3MU", "HE 4MU", + "HE 2RU", "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU", + "HE >16RU" + }; + static const char * const ul_he_type[] = { + "HE 2MU", "HE 3MU", "HE 4MU", "HE SU", "HE 2RU", + "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU", "HE >16RU" + }; + int ret, i; + u64 total_ppdu_cnt, sub_total_cnt; + + if (!dev->muru_debug) { + seq_puts(file, "Please enable muru_debug first.\n"); + return 0; + } + + mutex_lock(&dev->mt76.mutex); + + ret = mt7915_mcu_muru_debug_get(phy, &mu_stats); + if (ret) + goto exit; + + /* Non-HE Downlink*/ + seq_puts(file, "[Non-HE]\nDownlink\nData Type: "); + + for (i = 0; i < 5; i++) + seq_printf(file, "%8s | ", dl_non_he_type[i]); + +#define __dl_u32(s) le32_to_cpu(mu_stats.dl.s) + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | %8u | %8u | ", + __dl_u32(cck_cnt), + __dl_u32(ofdm_cnt), + __dl_u32(htmix_cnt), + __dl_u32(htgf_cnt), + __dl_u32(vht_su_cnt)); + + seq_puts(file, "\nDownlink MU-MIMO\nData Type: "); + + for (i = 5; i < 8; i++) + seq_printf(file, "%8s | ", dl_non_he_type[i]); + + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | ", + __dl_u32(vht_2mu_cnt), + __dl_u32(vht_3mu_cnt), + __dl_u32(vht_4mu_cnt)); + + sub_total_cnt = __dl_u32(vht_2mu_cnt) + + __dl_u32(vht_3mu_cnt) + + __dl_u32(vht_4mu_cnt); + + seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld", + sub_total_cnt); + + total_ppdu_cnt = sub_total_cnt + + __dl_u32(cck_cnt) + + __dl_u32(ofdm_cnt) + + __dl_u32(htmix_cnt) + + __dl_u32(htgf_cnt) + + __dl_u32(vht_su_cnt); + + seq_printf(file, "\nAll non-HE DL PPDU count: %lld", total_ppdu_cnt); + + /* HE Downlink */ + seq_puts(file, "\n\n[HE]\nDownlink\nData Type: "); + + for (i = 0; i < 2; i++) + seq_printf(file, "%8s | ", dl_he_type[i]); + + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | ", + __dl_u32(he_su_cnt), + __dl_u32(he_ext_su_cnt)); + + seq_puts(file, "\nDownlink MU-MIMO\nData Type: "); + + for (i = 2; i < 5; i++) + seq_printf(file, "%8s | ", dl_he_type[i]); + + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | ", + __dl_u32(he_2mu_cnt), + __dl_u32(he_3mu_cnt), + __dl_u32(he_4mu_cnt)); + + seq_puts(file, "\nDownlink OFDMA\nData Type: "); + + for (i = 5; i < 11; i++) + seq_printf(file, "%8s | ", dl_he_type[i]); + + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | %8u | %9u | %8u | ", + __dl_u32(he_2ru_cnt), + __dl_u32(he_3ru_cnt), + __dl_u32(he_4ru_cnt), + __dl_u32(he_5to8ru_cnt), + __dl_u32(he_9to16ru_cnt), + __dl_u32(he_gtr16ru_cnt)); + + sub_total_cnt = __dl_u32(he_2mu_cnt) + + __dl_u32(he_3mu_cnt) + + __dl_u32(he_4mu_cnt); + total_ppdu_cnt = sub_total_cnt; + + seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld", + sub_total_cnt); + + sub_total_cnt = __dl_u32(he_2ru_cnt) + + __dl_u32(he_3ru_cnt) + + __dl_u32(he_4ru_cnt) + + __dl_u32(he_5to8ru_cnt) + + __dl_u32(he_9to16ru_cnt) + + __dl_u32(he_gtr16ru_cnt); + total_ppdu_cnt += sub_total_cnt; + + seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld", + sub_total_cnt); + + total_ppdu_cnt += __dl_u32(he_su_cnt) + + __dl_u32(he_ext_su_cnt); + + seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt); +#undef __dl_u32 + + /* HE Uplink */ + seq_puts(file, "\n\nUplink"); + seq_puts(file, "\nTrigger-based Uplink MU-MIMO\nData Type: "); + + for (i = 0; i < 3; i++) + seq_printf(file, "%8s | ", ul_he_type[i]); + +#define __ul_u32(s) le32_to_cpu(mu_stats.ul.s) + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | ", + __ul_u32(hetrig_2mu_cnt), + __ul_u32(hetrig_3mu_cnt), + __ul_u32(hetrig_4mu_cnt)); + + seq_puts(file, "\nTrigger-based Uplink OFDMA\nData Type: "); + + for (i = 3; i < 10; i++) + seq_printf(file, "%8s | ", ul_he_type[i]); + + seq_puts(file, "\nTotal Count:"); + seq_printf(file, "%8u | %8u | %8u | %8u | %8u | %9u | %7u | ", + __ul_u32(hetrig_su_cnt), + __ul_u32(hetrig_2ru_cnt), + __ul_u32(hetrig_3ru_cnt), + __ul_u32(hetrig_4ru_cnt), + __ul_u32(hetrig_5to8ru_cnt), + __ul_u32(hetrig_9to16ru_cnt), + __ul_u32(hetrig_gtr16ru_cnt)); + + sub_total_cnt = __ul_u32(hetrig_2mu_cnt) + + __ul_u32(hetrig_3mu_cnt) + + __ul_u32(hetrig_4mu_cnt); + total_ppdu_cnt = sub_total_cnt; + + seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld", + sub_total_cnt); + + sub_total_cnt = __ul_u32(hetrig_2ru_cnt) + + __ul_u32(hetrig_3ru_cnt) + + __ul_u32(hetrig_4ru_cnt) + + __ul_u32(hetrig_5to8ru_cnt) + + __ul_u32(hetrig_9to16ru_cnt) + + __ul_u32(hetrig_gtr16ru_cnt); + total_ppdu_cnt += sub_total_cnt; + + seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld", + sub_total_cnt); + + total_ppdu_cnt += __ul_u32(hetrig_su_cnt); + + seq_printf(file, "\nAll HE UL TB PPDU count: %lld\n", total_ppdu_cnt); +#undef __ul_u32 + +exit: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} +DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats); + +static int mt7915_fw_debug_wm_set(void *data, u64 val) { struct mt7915_dev *dev = data; @@ -355,8 +574,8 @@ mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta) qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL, GENMASK(11, 0)); seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n", - sta->addr, msta->wcid.idx, msta->vif->wmm_idx, - ac, qlen); + sta->addr, msta->wcid.idx, + msta->vif->mt76.wmm_idx, ac, qlen); } } @@ -528,7 +747,9 @@ int mt7915_init_debugfs(struct mt7915_phy *phy) dir = mt76_register_debugfs_fops(phy->mt76, NULL); if (!dir) return -ENOMEM; - + debugfs_create_file("muru_debug", 0600, dir, dev, &fops_muru_debug); + debugfs_create_file("muru_stats", 0400, dir, phy, + &mt7915_muru_stats_fops); debugfs_create_file("hw-queues", 0400, dir, phy, &mt7915_hw_queues_fops); debugfs_create_file("xmit-queues", 0400, dir, phy, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index ee3d64434821..edd74d0de157 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: ISC /* Copyright (C) 2020 MediaTek Inc. */ +#include <linux/firmware.h> #include "mt7915.h" #include "eeprom.h" @@ -10,6 +11,9 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev) u8 *eeprom = mdev->eeprom.data; u32 val = eeprom[MT_EE_DO_PRE_CAL]; + if (!dev->flash_mode) + return 0; + if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP)) return 0; @@ -21,6 +25,49 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev) return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val); } +static int mt7915_check_eeprom(struct mt7915_dev *dev) +{ + u8 *eeprom = dev->mt76.eeprom.data; + u16 val = get_unaligned_le16(eeprom); + + switch (val) { + case 0x7915: + return 0; + default: + return -EINVAL; + } +} + +static int +mt7915_eeprom_load_default(struct mt7915_dev *dev) +{ + char *default_bin = MT7915_EEPROM_DEFAULT; + u8 *eeprom = dev->mt76.eeprom.data; + const struct firmware *fw = NULL; + int ret; + + if (dev->dbdc_support) + default_bin = MT7915_EEPROM_DEFAULT_DBDC; + + ret = request_firmware(&fw, default_bin, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data) { + dev_err(dev->mt76.dev, "Invalid default bin\n"); + ret = -EINVAL; + goto out; + } + + memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE); + dev->flash_mode = true; + +out: + release_firmware(fw); + + return ret; +} + static int mt7915_eeprom_load(struct mt7915_dev *dev) { int ret; @@ -31,10 +78,16 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev) if (ret) { dev->flash_mode = true; - ret = mt7915_eeprom_load_precal(dev); } else { + u8 free_block_num; u32 block_num, i; + mt7915_mcu_get_eeprom_free_block(dev, &free_block_num); + /* efuse info not enough */ + if (free_block_num >= 29) + return -EINVAL; + + /* read eeprom data from efuse */ block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE, MT7915_EEPROM_BLOCK_SIZE); for (i = 0; i < block_num; i++) @@ -42,20 +95,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev) i * MT7915_EEPROM_BLOCK_SIZE); } - return ret; -} - -static int mt7915_check_eeprom(struct mt7915_dev *dev) -{ - u8 *eeprom = dev->mt76.eeprom.data; - u16 val = get_unaligned_le16(eeprom); - - switch (val) { - case 0x7915: - return 0; - default: - return -EINVAL; - } + return mt7915_check_eeprom(dev); } void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy) @@ -117,10 +157,17 @@ int mt7915_eeprom_init(struct mt7915_dev *dev) int ret; ret = mt7915_eeprom_load(dev); - if (ret < 0) - return ret; + if (ret < 0) { + if (ret != -EINVAL) + return ret; + + dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n"); + ret = mt7915_eeprom_load_default(dev); + if (ret) + return ret; + } - ret = mt7915_check_eeprom(dev); + ret = mt7915_eeprom_load_precal(dev); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 4fa8e7ba93e6..d054cdecd5f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -853,7 +853,8 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, mt7915_gen_ppe_thresh(he_cap->ppe_thres, nss); } else { he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; + u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); } idx++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 5fcf35f2d9fb..48f115502282 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -268,10 +268,9 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, } static void -mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, - struct mt76_rx_status *status, - __le32 *rxv) +mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) { + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; static const struct ieee80211_radiotap_he_mu mu_known = { .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | @@ -281,6 +280,8 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, }; struct ieee80211_radiotap_he_mu *he_mu = NULL; + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + he_mu = skb_push(skb, sizeof(mu_known)); memcpy(he_mu, &mu_known, sizeof(mu_known)); @@ -308,10 +309,9 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, } static void -mt7915_mac_decode_he_radiotap(struct sk_buff *skb, - struct mt76_rx_status *status, - __le32 *rxv, u32 phy) +mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode) { + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; static const struct ieee80211_radiotap_he known = { .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | HE_BITS(DATA1_DATA_DCM_KNOWN) | @@ -329,6 +329,8 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, struct ieee80211_radiotap_he *he = NULL; u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; + status->flag |= RX_FLAG_RADIOTAP_HE; + he = skb_push(skb, sizeof(known)); memcpy(he, &known, sizeof(known)); @@ -343,7 +345,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); - switch (phy) { + switch (mode) { case MT_PHY_TYPE_HE_SU: he->data1 |= HE_BITS(DATA1_FORMAT_SU) | HE_BITS(DATA1_UL_DL_KNOWN) | @@ -366,6 +368,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]); mt7915_mac_decode_he_radiotap_ru(status, he, rxv); + mt7915_mac_decode_he_mu_radiotap(skb, rxv); break; case MT_PHY_TYPE_HE_TB: he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | @@ -384,6 +387,81 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, } } +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + struct ethhdr eth_hdr; + __le32 *rxd = (__le32 *)skb->data; + __le32 qos_ctrl, ht_ctrl; + + if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) != + MT_RXD3_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr)); + hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]); + hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]); + qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]); + ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]); + + hdr.duration_id = 0; + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (le16_to_cpu(hdr.frame_control) & + (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + ether_addr_copy(hdr.addr4, eth_hdr.h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr.h_proto == htons(ETH_P_AARP) || + eth_hdr.h_proto == htons(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN)) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, 2), &ht_ctrl, 2); + if (ieee80211_is_data_qos(hdr.frame_control)) + memcpy(skb_push(skb, 2), &qos_ctrl, 2); + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + return 0; +} + static int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) { @@ -391,7 +469,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) struct mt76_phy *mphy = &dev->mt76.phy; struct mt7915_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; - struct ieee80211_hdr *hdr; __le32 *rxd = (__le32 *)skb->data; __le32 *rxv = NULL; u32 mode = 0; @@ -404,6 +481,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) bool unicast, insert_ccmp_hdr = false; u8 remove_pad, amsdu_info; bool hdr_trans; + u16 hdr_gap; u16 seq_ctrl = 0; u8 qos_ctl = 0; __le16 fc = 0; @@ -426,9 +504,16 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) return -EINVAL; + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); - hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -604,15 +689,12 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) return -EINVAL; break; case MT_PHY_TYPE_HE_MU: - status->flag |= RX_FLAG_RADIOTAP_HE_MU; - fallthrough; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: status->nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; status->encoding = RX_ENC_HE; - status->flag |= RX_FLAG_RADIOTAP_HE; i &= GENMASK(3, 0); if (gi <= NL80211_RATE_INFO_HE_GI_3_2) @@ -654,27 +736,55 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) } } - skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); status->amsdu = !!amsdu_info; if (status->amsdu) { status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; - if (!hdr_trans) { - memmove(skb->data + 2, skb->data, - ieee80211_get_hdrlen_from_skb(skb)); - skb_pull(skb, 2); - } } - if (insert_ccmp_hdr && !hdr_trans) { - u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7915_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + int pad_start = 0; + + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { + pad_start = ieee80211_get_hdrlen_from_skb(skb); + } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { + /* + * When header translation failure is indicated, + * the hardware will insert an extra 2-byte field + * containing the data length after the protocol + * type field. + */ + pad_start = 12; + if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) + pad_start += 4; + + if (get_unaligned_be16(skb->data + pad_start) != + skb->len - pad_start - 2) + pad_start = 0; + } - mt76_insert_ccmp_hdr(skb, key_id); + if (pad_start) { + memmove(skb->data + 2, skb->data, pad_start); + skb_pull(skb, 2); + } } if (!hdr_trans) { + struct ieee80211_hdr *hdr; + + if (insert_ccmp_hdr) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + hdr = mt76_skb_get_hdr(skb); fc = hdr->frame_control; if (ieee80211_is_data_qos(fc)) { @@ -682,16 +792,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) qos_ctl = *ieee80211_get_qos_ctl(hdr); } } else { - status->flag &= ~(RX_FLAG_RADIOTAP_HE | - RX_FLAG_RADIOTAP_HE_MU); status->flag |= RX_FLAG_8023; } - if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) { - mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); - if (status->flag & RX_FLAG_RADIOTAP_HE_MU) - mt7915_mac_decode_he_mu_radiotap(skb, status, rxv); - } + if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) + mt7915_mac_decode_he_radiotap(skb, rxv, mode); if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; @@ -1044,8 +1149,8 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, if (vif) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - omac_idx = mvif->omac_idx; - wmm_idx = mvif->wmm_idx; + omac_idx = mvif->mt76.omac_idx; + wmm_idx = mvif->mt76.wmm_idx; } if (ext_phy && dev->mt76.phy2) @@ -1151,8 +1256,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + id = mt76_token_consume(mdev, &t); + if (id < 0) + return id; + pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, false); @@ -1175,16 +1286,9 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (vif) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - txp->bss_idx = mvif->idx; + txp->bss_idx = mvif->mt76.idx; } - t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); - t->skb = tx_info->skb; - - id = mt76_token_consume(mdev, &t); - if (id < 0) - return id; - txp->token = cpu_to_le16(id); if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) txp->rept_wds_wcid = cpu_to_le16(wcid->idx); @@ -1269,15 +1373,16 @@ out: } static void -mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) +mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) { - struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data; + struct mt7915_tx_free *free = (struct mt7915_tx_free *)data; struct mt76_dev *mdev = &dev->mt76; struct mt76_phy *mphy_ext = mdev->phy2; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; LIST_HEAD(free_list); - struct sk_buff *tmp; + struct sk_buff *skb, *tmp; + void *end = data + len; u8 i, count; bool wake = false; @@ -1295,6 +1400,9 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) * Should avoid accessing WTBL to get Tx airtime, and use it instead. */ count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); + if (WARN_ON_ONCE((void *)&free->info[count] > end)) + return; + for (i = 0; i < count; i++) { u32 msdu, info = le32_to_cpu(free->info[i]); @@ -1337,8 +1445,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) mt76_worker_schedule(&dev->mt76.tx_worker); - napi_consume_skb(skb, 1); - list_for_each_entry_safe(skb, tmp, &free_list, list) { skb_list_del_init(skb); napi_consume_skb(skb, 1); @@ -1513,6 +1619,27 @@ out: rcu_read_unlock(); } +bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + __le32 *rxd = (__le32 *)data; + __le32 *end = (__le32 *)&rxd[len / 4]; + enum rx_pkt_type type; + + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + switch (type) { + case PKT_TYPE_TXRX_NOTIFY: + mt7915_mac_tx_free(dev, data, len); + return false; + case PKT_TYPE_TXS: + for (rxd += 2; rxd + 8 <= end; rxd += 8) + mt7915_mac_add_txs(dev, rxd); + return false; + default: + return true; + } +} + void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb) { @@ -1525,7 +1652,8 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, switch (type) { case PKT_TYPE_TXRX_NOTIFY: - mt7915_mac_tx_free(dev, skb); + mt7915_mac_tx_free(dev, skb->data, skb->len); + napi_consume_skb(skb, 1); break; case PKT_TYPE_RX_EVENT: mt7915_mcu_rx_event(dev, skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 057ab27b7083..8ac6f59af174 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -203,8 +203,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, is_zero_ether_addr(vif->addr)) phy->monitor_vif = vif; - mvif->idx = ffs(~dev->mt76.vif_mask) - 1; - if (mvif->idx >= MT7915_MAX_INTERFACES) { + mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; + if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) { ret = -ENOSPC; goto out; } @@ -214,29 +214,27 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, ret = -ENOSPC; goto out; } - mvif->omac_idx = idx; + mvif->mt76.omac_idx = idx; mvif->phy = phy; - mvif->band_idx = ext_phy; + mvif->mt76.band_idx = ext_phy; - if (dev->mt76.phy2) - mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) + - mvif->idx % (MT7915_MAX_WMM_SETS / 2); - else - mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS; + mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + if (ext_phy) + mvif->mt76.wmm_idx += 2; ret = mt7915_mcu_add_dev_info(phy, vif, true); if (ret) goto out; - dev->mt76.vif_mask |= BIT(mvif->idx); - phy->omac_mask |= BIT_ULL(mvif->omac_idx); + dev->mt76.vif_mask |= BIT(mvif->mt76.idx); + phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - idx = MT7915_WTBL_RESERVED - mvif->idx; + idx = MT7915_WTBL_RESERVED - mvif->mt76.idx; INIT_LIST_HEAD(&mvif->sta.rc_list); INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; - mvif->sta.wcid.ext_phy = mvif->band_idx; + mvif->sta.wcid.ext_phy = mvif->mt76.band_idx; mvif->sta.wcid.hw_key_idx = -1; mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; mt76_packet_id_init(&mvif->sta.wcid); @@ -251,7 +249,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, } if (vif->type != NL80211_IFTYPE_AP && - (!mvif->omac_idx || mvif->omac_idx > 3)) + (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3)) vif->offload_flags = 0; vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; @@ -288,8 +286,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], NULL); mutex_lock(&dev->mt76.mutex); - dev->mt76.vif_mask &= ~BIT(mvif->idx); - phy->omac_mask &= ~BIT_ULL(mvif->omac_idx); + dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); + phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mutex_unlock(&dev->mt76.mutex); spin_lock_bh(&dev->sta_poll_lock); @@ -425,6 +423,28 @@ out: return err; } +static int mt7915_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = mt7915_hw_dev(hw); + int err = -EINVAL; + + mutex_lock(&dev->mt76.mutex); + if (!cfg80211_chandef_valid(&phy->mt76->chandef)) + goto out; + + err = mt76_init_sar_power(hw, sar); + if (err) + goto out; + + err = mt7915_mcu_set_txpower_sku(phy); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} + static int mt7915_config(struct ieee80211_hw *hw, u32 changed) { struct mt7915_dev *dev = mt7915_hw_dev(hw); @@ -556,7 +576,7 @@ mt7915_update_bss_color(struct ieee80211_hw *hw, case NL80211_IFTYPE_AP: { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - if (mvif->omac_idx > HW_BSSID_MAX) + if (mvif->mt76.omac_idx > HW_BSSID_MAX) return; fallthrough; } @@ -655,7 +675,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->vif = mvif; msta->wcid.sta = 1; msta->wcid.idx = idx; - msta->wcid.ext_phy = mvif->band_idx; + msta->wcid.ext_phy = mvif->mt76.band_idx; msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; msta->jiffies = jiffies; @@ -838,7 +858,8 @@ u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif) lockdep_assert_held(&dev->mt76.mutex); - n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; + n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : mvif->mt76.omac_idx; /* TSF software read */ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE, MT_LPON_TCR_SW_READ); @@ -878,7 +899,8 @@ mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_lock(&dev->mt76.mutex); - n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; + n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : mvif->mt76.omac_idx; mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]); /* TSF software overwrite */ @@ -904,7 +926,8 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_lock(&dev->mt76.mutex); - n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx; + n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : mvif->mt76.omac_idx; mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]); /* TSF software adjust*/ @@ -1195,7 +1218,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) struct mt76_ethtool_worker_info *wi = wi_data; struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - if (msta->vif->idx != wi->idx) + if (msta->vif->mt76.idx != wi->idx) return; mt76_ethtool_worker(wi, &msta->stats); @@ -1211,7 +1234,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt76_ethtool_worker_info wi = { .data = data, - .idx = mvif->idx, + .idx = mvif->mt76.idx, }; struct mib_stats *mib = &phy->mib; /* See mt7915_ampdu_stat_read_phy, etc */ @@ -1331,6 +1354,7 @@ const struct ieee80211_ops mt7915_ops = { .sw_scan_complete = mt76_sw_scan_complete, .release_buffered_frames = mt76_release_buffered_frames, .get_txpower = mt76_get_txpower, + .set_sar_specs = mt7915_set_sar_specs, .channel_switch_beacon = mt7915_channel_switch_beacon, .get_stats = mt7915_get_stats, .get_et_sset_count = mt7915_get_et_sset_count, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 899957b9d0f1..0911b6f973b5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (ht_cap->ht_supported) mode |= PHY_MODE_GN; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_24G; } else if (band == NL80211_BAND_5GHZ) { mode |= PHY_MODE_A; @@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta) if (vht_cap->vht_supported) mode |= PHY_MODE_AC; - if (he_cap->has_he) + if (he_cap && he_cap->has_he) mode |= PHY_MODE_AX_5G; } @@ -582,10 +582,10 @@ mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif, struct mt7915_sta *msta, int len) { struct sta_req_hdr hdr = { - .bss_idx = mvif->idx, + .bss_idx = mvif->mt76.idx, .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0, .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0, - .muar_idx = msta && msta->wcid.sta ? mvif->omac_idx : 0xe, + .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe, .is_tlv_append = 1, }; struct sk_buff *skb; @@ -613,7 +613,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta, if (!nskb) { nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - MT7915_WTBL_UPDATE_MAX_SIZE); + MT76_CONNAC_WTBL_UPDATE_MAX_SIZE); if (!nskb) return ERR_PTR(-ENOMEM); @@ -725,7 +725,7 @@ mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, bss->network_type = cpu_to_le32(type); bss->bmc_wcid_lo = to_wcid_lo(wlan_idx); bss->bmc_wcid_hi = to_wcid_hi(wlan_idx); - bss->wmm_idx = mvif->wmm_idx; + bss->wmm_idx = mvif->mt76.wmm_idx; bss->active = enable; if (vif->type != NL80211_IFTYPE_MONITOR) { @@ -769,10 +769,11 @@ mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) } omac = (struct bss_info_omac *)tlv; - idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; + idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 + : mvif->mt76.omac_idx; omac->conn_type = cpu_to_le32(type); - omac->omac_idx = mvif->omac_idx; - omac->band_idx = mvif->band_idx; + omac->omac_idx = mvif->mt76.omac_idx; + omac->band_idx = mvif->mt76.band_idx; omac->hw_bss_idx = idx; } @@ -937,7 +938,7 @@ mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif) int ext_bss_idx, tsf_offset; struct tlv *tlv; - ext_bss_idx = mvif->omac_idx - EXT_BSSID_START; + ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START; if (ext_bss_idx < 0) return; @@ -973,7 +974,7 @@ mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif, { struct mt7915_dev *dev = phy->dev; struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - u32 idx = mvif->omac_idx - REPEATER_BSSID_START; + u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START; u32 mask = phy->omac_mask >> 32 & ~BIT(idx); const u8 *addr = vif->addr; struct { @@ -1011,7 +1012,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct sk_buff *skb; - if (mvif->omac_idx >= REPEATER_BSSID_START) { + if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) { mt7915_mcu_muar_config(phy, vif, false, enable); mt7915_mcu_muar_config(phy, vif, true, enable); } @@ -1039,8 +1040,8 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, if (vif->bss_conf.he_support) mt7915_mcu_bss_he_tlv(skb, vif, phy); - if (mvif->omac_idx >= EXT_BSSID_START && - mvif->omac_idx < REPEATER_BSSID_START) + if (mvif->mt76.omac_idx >= EXT_BSSID_START && + mvif->mt76.omac_idx < REPEATER_BSSID_START) mt7915_mcu_bss_ext_tlv(skb, mvif); } out: @@ -1204,7 +1205,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, msta->wcid.amsdu = false; skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); + MT76_CONNAC_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1223,7 +1224,7 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, return ret; skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); + MT76_CONNAC_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1265,7 +1266,7 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, if (sta) { memcpy(generic->peer_addr, sta->addr, ETH_ALEN); generic->partial_aid = cpu_to_le16(sta->aid); - generic->muar_idx = mvif->omac_idx; + generic->muar_idx = mvif->mt76.omac_idx; generic->qos = sta->wme; } else { /* use BSSID in station mode */ @@ -1738,7 +1739,8 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, struct wtbl_req_hdr *wtbl_hdr; struct sk_buff *skb; - skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7915_WTBL_UPDATE_MAX_SIZE); + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + MT76_CONNAC_WTBL_UPDATE_MAX_SIZE); if (!skb) return -ENOMEM; @@ -1752,33 +1754,6 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, true); } -int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct wtbl_req_hdr *wtbl_hdr; - struct tlv *sta_wtbl; - struct sk_buff *skb; - - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - - wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, - &skb); - if (IS_ERR(wtbl_hdr)) - return PTR_ERR(wtbl_hdr); - - mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); -} - static inline bool mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool bfee) @@ -1954,7 +1929,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt7915_phy *phy = - mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; + mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy; int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; @@ -2021,7 +1996,7 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct mt7915_phy *phy = - mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy; + mvif->mt76.band_idx ? mt7915_ext_phy(dev) : &dev->phy; int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bfee *bfee; struct tlv *tlv; @@ -2049,6 +2024,21 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2); } +static enum mcu_mmps_mode +mt7915_mcu_get_mmps_mode(enum ieee80211_smps_mode smps) +{ + switch (smps) { + case IEEE80211_SMPS_OFF: + return MCU_MMPS_DISABLE; + case IEEE80211_SMPS_STATIC: + return MCU_MMPS_STATIC; + case IEEE80211_SMPS_DYNAMIC: + return MCU_MMPS_DYNAMIC; + default: + return MCU_MMPS_DISABLE; + } +} + int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2076,7 +2066,11 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, case RATE_PARAM_FIXED_MCS: case RATE_PARAM_FIXED_GI: case RATE_PARAM_FIXED_HE_LTF: - ra->phy = *phy; + if (phy) + ra->phy = *phy; + break; + case RATE_PARAM_MMPS_UPDATE: + ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode); break; default: break; @@ -2087,6 +2081,39 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, MCU_EXT_CMD(STA_REC_UPDATE), true); } +int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + int ret; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT76_CONNAC_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); + if (IS_ERR(wtbl_hdr)) + return PTR_ERR(wtbl_hdr); + + mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); + + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD(STA_REC_UPDATE), true); + if (ret) + return ret; + + return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, NULL, + RATE_PARAM_MMPS_UPDATE); +} + static int mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, struct ieee80211_vif *vif, @@ -2278,7 +2305,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, int ret; skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); + MT76_CONNAC_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -2323,7 +2350,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, u8 rsv1[8]; } __packed req = { .action = cpu_to_le32(MT_STA_BSS_GROUP), - .val = cpu_to_le32(mvif->idx % 16), + .val = cpu_to_le32(mvif->mt76.idx % 16), }; msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; @@ -2345,7 +2372,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, - MT7915_STA_UPDATE_MAX_SIZE); + MT76_CONNAC_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -2389,53 +2416,6 @@ out: MCU_EXT_CMD(STA_REC_UPDATE), true); } -int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, - struct ieee80211_sta *sta, u32 rate) -{ - struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; - struct mt7915_vif *mvif = msta->vif; - struct sta_rec_ra_fixed *ra; - struct sk_buff *skb; - struct tlv *tlv; - int len = sizeof(struct sta_req_hdr) + sizeof(*ra); - - skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); - ra = (struct sta_rec_ra_fixed *)tlv; - - if (!rate) { - ra->field = cpu_to_le32(RATE_PARAM_AUTO); - goto out; - } - - ra->field = cpu_to_le32(RATE_PARAM_FIXED); - ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate); - ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate); - ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate); - ra->phy.mcs = FIELD_GET(RATE_CFG_MCS, rate); - ra->phy.stbc = FIELD_GET(RATE_CFG_STBC, rate); - - if (ra->phy.bw) - ra->phy.ldpc = 7; - else - ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7; - - /* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */ - if (ra->phy.type > MT_PHY_TYPE_VHT) { - ra->phy.he_ltf = FIELD_GET(RATE_CFG_HE_LTF, rate) * 85; - ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 85; - } else { - ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 15; - } - -out: - return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD(STA_REC_UPDATE), true); -} - int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, bool enable) { @@ -2458,8 +2438,8 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, } __packed tlv; } data = { .hdr = { - .omac_idx = mvif->omac_idx, - .dbdc_idx = mvif->band_idx, + .omac_idx = mvif->mt76.omac_idx, + .dbdc_idx = mvif->mt76.band_idx, .tlv_num = cpu_to_le16(1), .is_tlv_append = 1, }, @@ -2467,11 +2447,11 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, .tag = cpu_to_le16(DEV_INFO_ACTIVE), .len = cpu_to_le16(sizeof(struct req_tlv)), .active = enable, - .dbdc_idx = mvif->band_idx, + .dbdc_idx = mvif->mt76.band_idx, }, }; - if (mvif->omac_idx >= REPEATER_BSSID_START) + if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) return mt7915_mcu_muar_config(phy, vif, false, enable); memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); @@ -2643,7 +2623,7 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, return -EINVAL; } - if (mvif->band_idx) { + if (mvif->mt76.band_idx) { info = IEEE80211_SKB_CB(skb); info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; } @@ -2967,7 +2947,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, FIELD_PREP(MT_TOP_MISC_FW_STATE, - FW_STATE_WACPU_RDY), 1000)) { + FW_STATE_RDY), 1000)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); return -EIO; } @@ -3014,6 +2994,47 @@ int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) sizeof(data), false); } +int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled) +{ + struct { + __le32 cmd; + u8 enable; + } data = { + .cmd = cpu_to_le32(MURU_SET_TXC_TX_STATS_EN), + .enable = enabled, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &data, + sizeof(data), false); +} + +int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms) +{ + struct mt7915_dev *dev = phy->dev; + struct sk_buff *skb; + struct mt7915_mcu_muru_stats *mu_stats = + (struct mt7915_mcu_muru_stats *)ms; + int ret; + + struct { + __le32 cmd; + u8 band_idx; + } req = { + .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS), + .band_idx = phy != &dev->phy, + }; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + memcpy(mu_stats, skb->data, sizeof(struct mt7915_mcu_muru_stats)); + dev_kfree_skb(skb); + + return 0; +} + static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled) { struct { @@ -3264,7 +3285,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) struct edca *e = &req.edca[ac]; e->set = WMM_PARAM_SET; - e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS; + e->queue = ac + mvif->mt76.wmm_idx * MT7915_MAX_WMM_SETS; e->aifs = q->aifs; e->txop = cpu_to_le16(q->txop); @@ -3579,6 +3600,30 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) return 0; } +int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num) +{ + struct { + u8 _rsv; + u8 version; + u8 die_idx; + u8 _rsv2; + } __packed req = { + .version = 1, + }; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req, + sizeof(req), true, &skb); + if (ret) + return ret; + + *block_num = *(u8 *)skb->data; + dev_kfree_skb(skb); + + return 0; +} + static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, u8 *data, u32 len, int cmd) { @@ -3854,11 +3899,11 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) struct mt76_power_limits limits_array; s8 *la = (s8 *)&limits_array; int i, idx, n_chains = hweight8(mphy->antenna_mask); - int tx_power; - - tx_power = hw->conf.power_level * 2 - - mt76_tx_power_nss_delta(n_chains); + int tx_power = hw->conf.power_level * 2; + tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, + tx_power); + tx_power -= mt76_tx_power_nss_delta(n_chains); tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, &limits_array, tx_power); mphy->txpower_cur = tx_power; @@ -4045,7 +4090,7 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, } __packed req = { .action = MT_SPR_ENABLE, .arg_num = 1, - .band_idx = mvif->band_idx, + .band_idx = mvif->mt76.band_idx, .val = cpu_to_le32(enable), }; @@ -4066,7 +4111,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, __le16 wcid; } __packed req = { .category = MCU_PHY_STATE_CONTENTION_RX_RATE, - .band = mvif->band_idx, + .band = mvif->mt76.band_idx, .wcid = cpu_to_le16(msta->wcid.idx), }; struct ieee80211_supported_band *sband; @@ -4206,11 +4251,11 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, } __packed req = { .tbl_idx = flow->table_id, .cmd = cmd, - .own_mac_idx = mvif->omac_idx, + .own_mac_idx = mvif->mt76.omac_idx, .flowid = flow->id, .peer_id = cpu_to_le16(flow->wcid), .duration = flow->duration, - .bss_idx = mvif->idx, + .bss_idx = mvif->mt76.idx, .start_tsf = cpu_to_le64(flow->tsf), .mantissa = flow->mantissa, .exponent = flow->exp, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 1f5a64ba9b59..92268e696931 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -4,6 +4,8 @@ #ifndef __MT7915_MCU_H #define __MT7915_MCU_H +#include "../mt76_connac_mcu.h" + struct mt7915_mcu_txd { __le32 txd[8]; @@ -23,29 +25,6 @@ struct mt7915_mcu_txd { u32 reserved[5]; } __packed __aligned(4); -/* event table */ -enum { - MCU_EVENT_TARGET_ADDRESS_LEN = 0x01, - MCU_EVENT_FW_START = 0x01, - MCU_EVENT_GENERIC = 0x01, - MCU_EVENT_ACCESS_REG = 0x02, - MCU_EVENT_MT_PATCH_SEM = 0x04, - MCU_EVENT_CH_PRIVILEGE = 0x18, - MCU_EVENT_EXT = 0xed, - MCU_EVENT_RESTART_DL = 0xef, -}; - -/* ext event table */ -enum { - MCU_EXT_EVENT_PS_SYNC = 0x5, - MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, - MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, - MCU_EXT_EVENT_ASSERT_DUMP = 0x23, - MCU_EXT_EVENT_RDD_REPORT = 0x3a, - MCU_EXT_EVENT_CSA_NOTIFY = 0x4f, - MCU_EXT_EVENT_BCC_NOTIFY = 0x75, -}; - enum { MCU_ATE_SET_TRX = 0x1, MCU_ATE_SET_FREQ_OFFSET = 0xa, @@ -206,6 +185,44 @@ struct mt7915_mcu_tx { struct edca edca[IEEE80211_NUM_ACS]; } __packed; +struct mt7915_mcu_muru_stats { + __le32 event_id; + struct { + __le32 cck_cnt; + __le32 ofdm_cnt; + __le32 htmix_cnt; + __le32 htgf_cnt; + __le32 vht_su_cnt; + __le32 vht_2mu_cnt; + __le32 vht_3mu_cnt; + __le32 vht_4mu_cnt; + __le32 he_su_cnt; + __le32 he_ext_su_cnt; + __le32 he_2ru_cnt; + __le32 he_2mu_cnt; + __le32 he_3ru_cnt; + __le32 he_3mu_cnt; + __le32 he_4ru_cnt; + __le32 he_4mu_cnt; + __le32 he_5to8ru_cnt; + __le32 he_9to16ru_cnt; + __le32 he_gtr16ru_cnt; + } dl; + + struct { + __le32 hetrig_su_cnt; + __le32 hetrig_2ru_cnt; + __le32 hetrig_3ru_cnt; + __le32 hetrig_4ru_cnt; + __le32 hetrig_5to8ru_cnt; + __le32 hetrig_9to16ru_cnt; + __le32 hetrig_gtr16ru_cnt; + __le32 hetrig_2mu_cnt; + __le32 hetrig_3mu_cnt; + __le32 hetrig_4mu_cnt; + } ul; +}; + #define WMM_AIFS_SET BIT(0) #define WMM_CW_MIN_SET BIT(1) #define WMM_CW_MAX_SET BIT(2) @@ -216,83 +233,11 @@ struct mt7915_mcu_tx { #define MCU_PKT_ID 0xa0 enum { - MCU_Q_QUERY, - MCU_Q_SET, - MCU_Q_RESERVED, - MCU_Q_NA -}; - -enum { - MCU_S2D_H2N, - MCU_S2D_C2N, - MCU_S2D_H2C, - MCU_S2D_H2CN -}; - -enum { MCU_FW_LOG_WM, MCU_FW_LOG_WA, MCU_FW_LOG_TO_HOST, }; -#define __MCU_CMD_FIELD_ID GENMASK(7, 0) -#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8) -#define __MCU_CMD_FIELD_QUERY BIT(16) -#define __MCU_CMD_FIELD_WA BIT(17) - -enum { - MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01, - MCU_CMD_FW_START_REQ = 0x02, - MCU_CMD_INIT_ACCESS_REG = 0x3, - MCU_CMD_NIC_POWER_CTRL = 0x4, - MCU_CMD_PATCH_START_REQ = 0x05, - MCU_CMD_PATCH_FINISH_REQ = 0x07, - MCU_CMD_PATCH_SEM_CONTROL = 0x10, - MCU_CMD_WA_PARAM = 0xC4, - MCU_CMD_EXT_CID = 0xED, - MCU_CMD_FW_SCATTER = 0xEE, - MCU_CMD_RESTART_DL_REQ = 0xEF, -}; - -enum { - MCU_EXT_CMD_EFUSE_ACCESS = 0x01, - MCU_EXT_CMD_RF_TEST = 0x04, - MCU_EXT_CMD_PM_STATE_CTRL = 0x07, - MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, - MCU_EXT_CMD_FW_LOG_2_HOST = 0x13, - MCU_EXT_CMD_TXBF_ACTION = 0x1e, - MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21, - MCU_EXT_CMD_THERMAL_PROT = 0x23, - MCU_EXT_CMD_STA_REC_UPDATE = 0x25, - MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26, - MCU_EXT_CMD_EDCA_UPDATE = 0x27, - MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, - MCU_EXT_CMD_THERMAL_CTRL = 0x2c, - MCU_EXT_CMD_WTBL_UPDATE = 0x32, - MCU_EXT_CMD_SET_DRR_CTRL = 0x36, - MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, - MCU_EXT_CMD_ATE_CTRL = 0x3d, - MCU_EXT_CMD_PROTECT_CTRL = 0x3e, - MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, - MCU_EXT_CMD_RX_HDR_TRANS = 0x47, - MCU_EXT_CMD_MUAR_UPDATE = 0x48, - MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a, - MCU_EXT_CMD_SET_RX_PATH = 0x4e, - MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, - MCU_EXT_CMD_GET_MIB_INFO = 0x5a, - MCU_EXT_CMD_MWDS_SUPPORT = 0x80, - MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, - MCU_EXT_CMD_SCS_CTRL = 0x82, - MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94, - MCU_EXT_CMD_FW_DBG_CTRL = 0x95, - MCU_EXT_CMD_SET_RDD_TH = 0x9d, - MCU_EXT_CMD_MURU_CTRL = 0x9f, - MCU_EXT_CMD_SET_SPR = 0xa8, - MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab, - MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac, - MCU_EXT_CMD_PHY_STAT_INFO = 0xad, -}; - enum { MCU_TWT_AGRT_ADD, MCU_TWT_AGRT_MODIFY, @@ -314,55 +259,11 @@ enum { MCU_WA_PARAM_RED = 0x0e, }; -#define MCU_CMD(_t) FIELD_PREP(__MCU_CMD_FIELD_ID, MCU_CMD_##_t) -#define MCU_EXT_CMD(_t) (MCU_CMD(EXT_CID) | \ - FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \ - MCU_EXT_CMD_##_t)) -#define MCU_EXT_QUERY(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_QUERY) - -#define MCU_WA_CMD(_t) (MCU_CMD(_t) | __MCU_CMD_FIELD_WA) -#define MCU_WA_EXT_CMD(_t) (MCU_EXT_CMD(_t) | __MCU_CMD_FIELD_WA) -#define MCU_WA_PARAM_CMD(_t) (MCU_WA_CMD(WA_PARAM) | \ - FIELD_PREP(__MCU_CMD_FIELD_EXT_ID, \ - MCU_WA_PARAM_CMD_##_t)) - -enum { - PATCH_SEM_RELEASE, - PATCH_SEM_GET -}; - -enum { - PATCH_NOT_DL_SEM_FAIL, - PATCH_IS_DL, - PATCH_NOT_DL_SEM_SUCCESS, - PATCH_REL_SEM_SUCCESS -}; - -enum { - FW_STATE_INITIAL, - FW_STATE_FW_DOWNLOAD, - FW_STATE_NORMAL_OPERATION, - FW_STATE_NORMAL_TRX, - FW_STATE_WACPU_RDY = 7 -}; - -enum { - EE_MODE_EFUSE, - EE_MODE_BUFFER, -}; - -enum { - EE_FORMAT_BIN, - EE_FORMAT_WHOLE, - EE_FORMAT_MULTIPLE, -}; - -enum { - MCU_PHY_STATE_TX_RATE, - MCU_PHY_STATE_RX_RATE, - MCU_PHY_STATE_RSSI, - MCU_PHY_STATE_CONTENTION_RX_RATE, - MCU_PHY_STATE_OFDMLQ_CNINFO, +enum mcu_mmps_mode { + MCU_MMPS_STATIC, + MCU_MMPS_DYNAMIC, + MCU_MMPS_RSV, + MCU_MMPS_DISABLE, }; #define STA_TYPE_STA BIT(0) @@ -389,11 +290,6 @@ enum { #define CONN_STATE_PORT_SECURE 2 enum { - DEV_INFO_ACTIVE, - DEV_INFO_MAX_NUM -}; - -enum { SCS_SEND_DATA, SCS_SET_MANUAL_PD_TH, SCS_CONFIG, @@ -403,75 +299,6 @@ enum { SCS_GET_GLO_ADDR_EVENT, }; -enum { - CMD_CBW_20MHZ = IEEE80211_STA_RX_BW_20, - CMD_CBW_40MHZ = IEEE80211_STA_RX_BW_40, - CMD_CBW_80MHZ = IEEE80211_STA_RX_BW_80, - CMD_CBW_160MHZ = IEEE80211_STA_RX_BW_160, - CMD_CBW_10MHZ, - CMD_CBW_5MHZ, - CMD_CBW_8080MHZ, - - CMD_HE_MCS_BW80 = 0, - CMD_HE_MCS_BW160, - CMD_HE_MCS_BW8080, - CMD_HE_MCS_BW_NUM -}; - -struct tlv { - __le16 tag; - __le16 len; -} __packed; - -struct bss_info_omac { - __le16 tag; - __le16 len; - u8 hw_bss_idx; - u8 omac_idx; - u8 band_idx; - u8 rsv0; - __le32 conn_type; - u32 rsv1; -} __packed; - -struct bss_info_basic { - __le16 tag; - __le16 len; - __le32 network_type; - u8 active; - u8 rsv0; - __le16 bcn_interval; - u8 bssid[ETH_ALEN]; - u8 wmm_idx; - u8 dtim_period; - u8 bmc_wcid_lo; - u8 cipher; - u8 phy_mode; - u8 max_bssid; /* max BSSID. range: 1 ~ 8, 0: MBSSID disabled */ - u8 non_tx_bssid;/* non-transmitted BSSID, 0: transmitted BSSID */ - u8 bmc_wcid_hi; /* high Byte and version */ - u8 rsv[2]; -} __packed; - -struct bss_info_rf_ch { - __le16 tag; - __le16 len; - u8 pri_ch; - u8 center_ch0; - u8 center_ch1; - u8 bw; - u8 he_ru26_block; /* 1: don't send HETB in RU26, 0: allow */ - u8 he_all_disable; /* 1: disallow all HETB, 0: allow */ - u8 rsv[2]; -} __packed; - -struct bss_info_ext_bss { - __le16 tag; - __le16 len; - __le32 mbss_tsf_offset; /* in unit of us */ - u8 rsv[8]; -} __packed; - struct bss_info_bmc_rate { __le16 tag; __le16 len; @@ -581,385 +408,8 @@ enum { }; enum { - BSS_INFO_OMAC, - BSS_INFO_BASIC, - BSS_INFO_RF_CH, /* optional, for BT/LTE coex */ - BSS_INFO_PM, /* sta only */ - BSS_INFO_UAPSD, /* sta only */ - BSS_INFO_ROAM_DETECT, /* obsoleted */ - BSS_INFO_LQ_RM, /* obsoleted */ - BSS_INFO_EXT_BSS, - BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */ - BSS_INFO_SYNC_MODE, /* obsoleted */ - BSS_INFO_RA, - BSS_INFO_HW_AMSDU, - BSS_INFO_BSS_COLOR, - BSS_INFO_HE_BASIC, - BSS_INFO_PROTECT_INFO, - BSS_INFO_OFFLOAD, - BSS_INFO_11V_MBSSID, - BSS_INFO_MAX_NUM -}; - -enum { - WTBL_RESET_AND_SET = 1, - WTBL_SET, - WTBL_QUERY, - WTBL_RESET_ALL -}; - -struct wtbl_req_hdr { - u8 wlan_idx_lo; - u8 operation; - __le16 tlv_num; - u8 wlan_idx_hi; - u8 rsv[3]; -} __packed; - -struct wtbl_generic { - __le16 tag; - __le16 len; - u8 peer_addr[ETH_ALEN]; - u8 muar_idx; - u8 skip_tx; - u8 cf_ack; - u8 qos; - u8 mesh; - u8 adm; - __le16 partial_aid; - u8 baf_en; - u8 aad_om; -} __packed; - -struct wtbl_rx { - __le16 tag; - __le16 len; - u8 rcid; - u8 rca1; - u8 rca2; - u8 rv; - u8 rsv[4]; -} __packed; - -struct wtbl_ht { - __le16 tag; - __le16 len; - u8 ht; - u8 ldpc; - u8 af; - u8 mm; - u8 rsv[4]; -} __packed; - -struct wtbl_vht { - __le16 tag; - __le16 len; - u8 ldpc; - u8 dyn_bw; - u8 vht; - u8 txop_ps; - u8 rsv[4]; -} __packed; - -struct wtbl_hdr_trans { - __le16 tag; - __le16 len; - u8 to_ds; - u8 from_ds; - u8 no_rx_trans; - u8 _rsv; -}; - -enum { - MT_BA_TYPE_INVALID, - MT_BA_TYPE_ORIGINATOR, - MT_BA_TYPE_RECIPIENT -}; - -enum { - RST_BA_MAC_TID_MATCH, - RST_BA_MAC_MATCH, - RST_BA_NO_MATCH -}; - -struct wtbl_ba { - __le16 tag; - __le16 len; - /* common */ - u8 tid; - u8 ba_type; - u8 rsv0[2]; - /* originator only */ - __le16 sn; - u8 ba_en; - u8 ba_winsize_idx; - /* originator & recipient */ - __le16 ba_winsize; - /* recipient only */ - u8 peer_addr[ETH_ALEN]; - u8 rst_ba_tid; - u8 rst_ba_sel; - u8 rst_ba_sb; - u8 band_idx; - u8 rsv1[4]; -} __packed; - -struct wtbl_smps { - __le16 tag; - __le16 len; - u8 smps; - u8 rsv[3]; -} __packed; - -enum { - WTBL_GENERIC, - WTBL_RX, - WTBL_HT, - WTBL_VHT, - WTBL_PEER_PS, /* not used */ - WTBL_TX_PS, - WTBL_HDR_TRANS, - WTBL_SEC_KEY, - WTBL_BA, - WTBL_RDG, /* obsoleted */ - WTBL_PROTECT, /* not used */ - WTBL_CLEAR, /* not used */ - WTBL_BF, - WTBL_SMPS, - WTBL_RAW_DATA, /* debug only */ - WTBL_PN, - WTBL_SPE, - WTBL_MAX_NUM -}; - -struct sta_ntlv_hdr { - u8 rsv[2]; - __le16 tlv_num; -} __packed; - -struct sta_req_hdr { - u8 bss_idx; - u8 wlan_idx_lo; - __le16 tlv_num; - u8 is_tlv_append; - u8 muar_idx; - u8 wlan_idx_hi; - u8 rsv; -} __packed; - -struct sta_rec_basic { - __le16 tag; - __le16 len; - __le32 conn_type; - u8 conn_state; - u8 qos; - __le16 aid; - u8 peer_addr[ETH_ALEN]; - __le16 extra_info; -} __packed; - -struct sta_rec_ht { - __le16 tag; - __le16 len; - __le16 ht_cap; - u16 rsv; -} __packed; - -struct sta_rec_vht { - __le16 tag; - __le16 len; - __le32 vht_cap; - __le16 vht_rx_mcs_map; - __le16 vht_tx_mcs_map; - u8 rts_bw_sig; - u8 rsv[3]; -} __packed; - -struct sta_rec_uapsd { - __le16 tag; - __le16 len; - u8 dac_map; - u8 tac_map; - u8 max_sp; - u8 rsv0; - __le16 listen_interval; - u8 rsv1[2]; -} __packed; - -struct sta_rec_muru { - __le16 tag; - __le16 len; - - struct { - bool ofdma_dl_en; - bool ofdma_ul_en; - bool mimo_dl_en; - bool mimo_ul_en; - u8 rsv[4]; - } cfg; - - struct { - u8 punc_pream_rx; - bool he_20m_in_40m_2g; - bool he_20m_in_160m; - bool he_80m_in_160m; - bool lt16_sigb; - bool rx_su_comp_sigb; - bool rx_su_non_comp_sigb; - u8 rsv; - } ofdma_dl; - - struct { - u8 t_frame_dur; - u8 mu_cascading; - u8 uo_ra; - u8 he_2x996_tone; - u8 rx_t_frame_11ac; - u8 rsv[3]; - } ofdma_ul; - - struct { - bool vht_mu_bfee; - bool partial_bw_dl_mimo; - u8 rsv[2]; - } mimo_dl; - - struct { - bool full_ul_mimo; - bool partial_ul_mimo; - u8 rsv[2]; - } mimo_ul; -} __packed; - -struct sta_rec_he { - __le16 tag; - __le16 len; - - __le32 he_cap; - - u8 t_frame_dur; - u8 max_ampdu_exp; - u8 bw_set; - u8 device_class; - u8 dcm_tx_mode; - u8 dcm_tx_max_nss; - u8 dcm_rx_mode; - u8 dcm_rx_max_nss; - u8 dcm_max_ru; - u8 punc_pream_rx; - u8 pkt_ext; - u8 rsv1; - - __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM]; - - u8 rsv2[2]; -} __packed; - -struct sta_rec_ba { - __le16 tag; - __le16 len; - u8 tid; - u8 ba_type; - u8 amsdu; - u8 ba_en; - __le16 ssn; - __le16 winsize; -} __packed; - -struct sta_rec_amsdu { - __le16 tag; - __le16 len; - u8 max_amsdu_num; - u8 max_mpdu_size; - u8 amsdu_en; - u8 rsv; -} __packed; - -struct sec_key { - u8 cipher_id; - u8 cipher_len; - u8 key_id; - u8 key_len; - u8 key[32]; -} __packed; - -struct sta_rec_sec { - __le16 tag; - __le16 len; - u8 add; - u8 n_cipher; - u8 rsv[2]; - - struct sec_key key[2]; -} __packed; - -struct sta_phy { - u8 type; - u8 flag; - u8 stbc; - u8 sgi; - u8 bw; - u8 ldpc; - u8 mcs; - u8 nss; - u8 he_ltf; -}; - -struct sta_rec_ra { - __le16 tag; - __le16 len; - - u8 valid; - u8 auto_rate; - u8 phy_mode; - u8 channel; - u8 bw; - u8 disable_cck; - u8 ht_mcs32; - u8 ht_gf; - u8 ht_mcs[4]; - u8 mmps_mode; - u8 gband_256; - u8 af; - u8 auth_wapi_mode; - u8 rate_len; - - u8 supp_mode; - u8 supp_cck_rate; - u8 supp_ofdm_rate; - __le32 supp_ht_mcs; - __le16 supp_vht_mcs[4]; - - u8 op_mode; - u8 op_vht_chan_width; - u8 op_vht_rx_nss; - u8 op_vht_rx_nss_type; - - __le32 sta_cap; - - struct sta_phy phy; -} __packed; - -struct sta_rec_ra_fixed { - __le16 tag; - __le16 len; - - __le32 field; - u8 op_mode; - u8 op_vht_chan_width; - u8 op_vht_rx_nss; - u8 op_vht_rx_nss_type; - - struct sta_phy phy; - - u8 spe_en; - u8 short_preamble; - u8 is_5g; - u8 mmps_mode; -} __packed; - -enum { RATE_PARAM_FIXED = 3, + RATE_PARAM_MMPS_UPDATE = 5, RATE_PARAM_FIXED_HE_LTF = 7, RATE_PARAM_FIXED_MCS, RATE_PARAM_FIXED_GI = 11, @@ -975,120 +425,6 @@ enum { #define RATE_CFG_PHY_TYPE GENMASK(27, 24) #define RATE_CFG_HE_LTF GENMASK(31, 28) -struct sta_rec_bf { - __le16 tag; - __le16 len; - - __le16 pfmu; /* 0xffff: no access right for PFMU */ - bool su_mu; /* 0: SU, 1: MU */ - u8 bf_cap; /* 0: iBF, 1: eBF */ - u8 sounding_phy; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT */ - u8 ndpa_rate; - u8 ndp_rate; - u8 rept_poll_rate; - u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */ - u8 ncol; - u8 nrow; - u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */ - - u8 mem_total; - u8 mem_20m; - struct { - u8 row; - u8 col: 6, row_msb: 2; - } mem[4]; - - __le16 smart_ant; - u8 se_idx; - u8 auto_sounding; /* b7: low traffic indicator - * b6: Stop sounding for this entry - * b5 ~ b0: postpone sounding - */ - u8 ibf_timeout; - u8 ibf_dbw; - u8 ibf_ncol; - u8 ibf_nrow; - u8 nrow_bw160; - u8 ncol_bw160; - u8 ru_start_idx; - u8 ru_end_idx; - - bool trigger_su; - bool trigger_mu; - bool ng16_su; - bool ng16_mu; - bool codebook42_su; - bool codebook75_mu; - - u8 he_ltf; - u8 rsv[3]; -} __packed; - -struct sta_rec_bfee { - __le16 tag; - __le16 len; - bool fb_identity_matrix; /* 1: feedback identity matrix */ - bool ignore_feedback; /* 1: ignore */ - u8 rsv[2]; -} __packed; - -enum { - STA_REC_BASIC, - STA_REC_RA, - STA_REC_RA_CMM_INFO, - STA_REC_RA_UPDATE, - STA_REC_BF, - STA_REC_AMSDU, - STA_REC_BA, - STA_REC_RED, /* not used */ - STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */ - STA_REC_HT, - STA_REC_VHT, - STA_REC_APPS, - STA_REC_KEY, - STA_REC_WTBL, - STA_REC_HE, - STA_REC_HW_AMSDU, - STA_REC_WTBL_AADOM, - STA_REC_KEY_V2, - STA_REC_MURU, - STA_REC_MUEDCA, - STA_REC_BFEE, - STA_REC_MAX_NUM -}; - -enum mcu_cipher_type { - MCU_CIPHER_NONE = 0, - MCU_CIPHER_WEP40, - MCU_CIPHER_WEP104, - MCU_CIPHER_WEP128, - MCU_CIPHER_TKIP, - MCU_CIPHER_AES_CCMP, - MCU_CIPHER_CCMP_256, - MCU_CIPHER_GCMP, - MCU_CIPHER_GCMP_256, - MCU_CIPHER_WAPI, - MCU_CIPHER_BIP_CMAC_128, -}; - -enum { - CH_SWITCH_NORMAL = 0, - CH_SWITCH_SCAN = 3, - CH_SWITCH_MCC = 4, - CH_SWITCH_DFS = 5, - CH_SWITCH_BACKGROUND_SCAN_START = 6, - CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7, - CH_SWITCH_BACKGROUND_SCAN_STOP = 8, - CH_SWITCH_SCAN_BYPASS_DPD = 9 -}; - -enum { - THERMAL_SENSOR_TEMP_QUERY, - THERMAL_SENSOR_MANUAL_CTRL, - THERMAL_SENSOR_INFO_QUERY, - THERMAL_SENSOR_TASK_CTRL, -}; - enum { THERMAL_PROTECT_PARAMETER_CTRL, THERMAL_PROTECT_BASIC_INFO, @@ -1116,28 +452,11 @@ enum { MURU_PLATFORM_TYPE_PERF_LEVEL_2, }; -#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \ - sizeof(struct wtbl_generic) + \ - sizeof(struct wtbl_rx) + \ - sizeof(struct wtbl_ht) + \ - sizeof(struct wtbl_vht) + \ - sizeof(struct wtbl_hdr_trans) +\ - sizeof(struct wtbl_ba) + \ - sizeof(struct wtbl_smps)) - -#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ - sizeof(struct sta_rec_basic) + \ - sizeof(struct sta_rec_bf) + \ - sizeof(struct sta_rec_ht) + \ - sizeof(struct sta_rec_he) + \ - sizeof(struct sta_rec_ba) + \ - sizeof(struct sta_rec_vht) + \ - sizeof(struct sta_rec_uapsd) + \ - sizeof(struct sta_rec_amsdu) + \ - sizeof(struct sta_rec_muru) + \ - sizeof(struct sta_rec_bfee) + \ - sizeof(struct tlv) + \ - MT7915_WTBL_UPDATE_MAX_SIZE) +/* tx cmd tx statistics */ +enum { + MURU_SET_TXC_TX_STATS_EN = 150, + MURU_GET_TXC_TX_STATS = 151, +}; #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_omac) + \ @@ -1154,62 +473,4 @@ enum { sizeof(struct bss_info_bcn_mbss) + \ sizeof(struct bss_info_bcn_cont)) -#define PHY_MODE_A BIT(0) -#define PHY_MODE_B BIT(1) -#define PHY_MODE_G BIT(2) -#define PHY_MODE_GN BIT(3) -#define PHY_MODE_AN BIT(4) -#define PHY_MODE_AC BIT(5) -#define PHY_MODE_AX_24G BIT(6) -#define PHY_MODE_AX_5G BIT(7) -#define PHY_MODE_AX_6G BIT(8) - -#define MODE_CCK BIT(0) -#define MODE_OFDM BIT(1) -#define MODE_HT BIT(2) -#define MODE_VHT BIT(3) -#define MODE_HE BIT(4) - -#define STA_CAP_WMM BIT(0) -#define STA_CAP_SGI_20 BIT(4) -#define STA_CAP_SGI_40 BIT(5) -#define STA_CAP_TX_STBC BIT(6) -#define STA_CAP_RX_STBC BIT(7) -#define STA_CAP_VHT_SGI_80 BIT(16) -#define STA_CAP_VHT_SGI_160 BIT(17) -#define STA_CAP_VHT_TX_STBC BIT(18) -#define STA_CAP_VHT_RX_STBC BIT(19) -#define STA_CAP_VHT_LDPC BIT(23) -#define STA_CAP_LDPC BIT(24) -#define STA_CAP_HT BIT(26) -#define STA_CAP_VHT BIT(27) -#define STA_CAP_HE BIT(28) - -/* HE MAC */ -#define STA_REC_HE_CAP_HTC BIT(0) -#define STA_REC_HE_CAP_BQR BIT(1) -#define STA_REC_HE_CAP_BSR BIT(2) -#define STA_REC_HE_CAP_OM BIT(3) -#define STA_REC_HE_CAP_AMSDU_IN_AMPDU BIT(4) -/* HE PHY */ -#define STA_REC_HE_CAP_DUAL_BAND BIT(5) -#define STA_REC_HE_CAP_LDPC BIT(6) -#define STA_REC_HE_CAP_TRIG_CQI_FK BIT(7) -#define STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE BIT(8) -/* STBC */ -#define STA_REC_HE_CAP_LE_EQ_80M_TX_STBC BIT(9) -#define STA_REC_HE_CAP_LE_EQ_80M_RX_STBC BIT(10) -#define STA_REC_HE_CAP_GT_80M_TX_STBC BIT(11) -#define STA_REC_HE_CAP_GT_80M_RX_STBC BIT(12) -/* GI */ -#define STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI BIT(13) -#define STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI BIT(14) -#define STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI BIT(15) -#define STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI BIT(16) -#define STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI BIT(17) -/* 242 TONE */ -#define STA_REC_HE_CAP_BW20_RU242_SUPPORT BIT(18) -#define STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242 BIT(19) -#define STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242 BIT(20) - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index e69b4c8974ee..42d887383e8d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -30,6 +30,9 @@ #define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin" #define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin" +#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin" +#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin" + #define MT7915_EEPROM_SIZE 3584 #define MT7915_EEPROM_BLOCK_SIZE 16 #define MT7915_TOKEN_SIZE 8192 @@ -121,10 +124,7 @@ struct mt7915_vif_cap { }; struct mt7915_vif { - u16 idx; - u8 omac_idx; - u8 band_idx; - u8 wmm_idx; + struct mt76_vif mt76; /* must be first */ struct mt7915_vif_cap cap; struct mt7915_sta sta; @@ -270,6 +270,7 @@ struct mt7915_dev { bool dbdc_support; bool flash_mode; + bool muru_debug; bool ibf; u8 fw_debug_wm; u8 fw_debug_wa; @@ -283,20 +284,6 @@ struct mt7915_dev { }; enum { - HW_BSSID_0 = 0x0, - HW_BSSID_1, - HW_BSSID_2, - HW_BSSID_3, - HW_BSSID_MAX = HW_BSSID_3, - EXT_BSSID_START = 0x10, - EXT_BSSID_1, - EXT_BSSID_15 = 0x1f, - EXT_BSSID_MAX = EXT_BSSID_15, - REPEATER_BSSID_START = 0x20, - REPEATER_BSSID_MAX = 0x3f, -}; - -enum { MT_CTX0, MT_HIF0 = 0x0, @@ -423,6 +410,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, void *data, u32 field); int mt7915_mcu_set_eeprom(struct mt7915_dev *dev); int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset); +int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num); int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans); int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, @@ -515,6 +503,7 @@ void mt7915_tx_token_put(struct mt7915_dev *dev); int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc); void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); +bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len); void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); void mt7915_stats_work(struct work_struct *work); int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force); @@ -522,6 +511,8 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy); void mt7915_set_stream_he_caps(struct mt7915_phy *phy); void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy); void mt7915_update_channel(struct mt76_phy *mphy); +int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable); +int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms); int mt7915_init_debugfs(struct mt7915_phy *phy); #ifdef CONFIG_MAC80211_DEBUGFS void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 0af4cdb897b7..8130ea43971f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -230,6 +230,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, .tx_prepare_skb = mt7915_tx_prepare_skb, .tx_complete_skb = mt7915_tx_complete_skb, .rx_skb = mt7915_queue_rx_skb, + .rx_check = mt7915_rx_check, .rx_poll_complete = mt7915_rx_poll_complete, .sta_ps = mt7915_sta_ps, .sta_add = mt7915_mac_sta_add, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c index 89aae323d29e..af80c2cf8c83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -361,16 +361,15 @@ mt7915_tm_reg_backup_restore(struct mt7915_phy *phy) return; } - if (b) - return; - - b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL); - if (!b) - return; + if (!b) { + b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL); + if (!b) + return; - phy->test.reg_backup = b; - for (i = 0; i < n_regs; i++) - b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]); + phy->test.reg_backup = b; + for (i = 0; i < n_regs; i++) + b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]); + } mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT | MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT | diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c index 7cdfdf83529f..86fd7292b229 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c @@ -276,7 +276,7 @@ mt7921_pm_set(void *data, u64 val) struct mt7921_dev *dev = data; struct mt76_connac_pm *pm = &dev->pm; - mt7921_mutex_acquire(dev); + mutex_lock(&dev->mt76.mutex); if (val == pm->enable) goto out; @@ -285,7 +285,11 @@ mt7921_pm_set(void *data, u64 val) pm->stats.last_wake_event = jiffies; pm->stats.last_doze_event = jiffies; } - pm->enable = val; + /* make sure the chip is awake here and ps_work is scheduled + * just at end of the this routine. + */ + pm->enable = false; + mt76_connac_pm_wake(&dev->mphy, pm); ieee80211_iterate_active_interfaces(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, @@ -293,8 +297,10 @@ mt7921_pm_set(void *data, u64 val) mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable); + pm->enable = val; + mt76_connac_power_save_sched(&dev->mphy, pm); out: - mt7921_mutex_release(dev); + mutex_unlock(&dev->mt76.mutex); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index 210998f086ab..ad59ef9839dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -76,14 +76,6 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->max_sched_scan_reqs = 1; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->reg_notifier = mt7921_regd_notifier; - wiphy->sar_capa = &mt76_sar_capa; - - phy->mt76->frp = devm_kcalloc(dev->mt76.dev, - wiphy->sar_capa->num_freq_ranges, - sizeof(struct mt76_freq_range_power), - GFP_KERNEL); - if (!phy->mt76->frp) - return -ENOMEM; wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; @@ -264,6 +256,10 @@ int mt7921_register_device(struct mt7921_dev *dev) IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + if (is_mt7922(&dev->mt76)) + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | + IEEE80211_VHT_CAP_SHORT_GI_160; dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index db3302b1576a..ec10f95a4649 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -218,10 +218,9 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, } static void -mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, - struct mt76_rx_status *status, - __le32 *rxv) +mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) { + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; static const struct ieee80211_radiotap_he_mu mu_known = { .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | @@ -233,6 +232,8 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, }; struct ieee80211_radiotap_he_mu *he_mu; + status->flag |= RX_FLAG_RADIOTAP_HE_MU; + he_mu = skb_push(skb, sizeof(mu_known)); memcpy(he_mu, &mu_known, sizeof(mu_known)); @@ -263,10 +264,9 @@ mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb, } static void -mt7921_mac_decode_he_radiotap(struct sk_buff *skb, - struct mt76_rx_status *status, - __le32 *rxv, u32 phy) +mt7921_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode) { + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; static const struct ieee80211_radiotap_he known = { .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | HE_BITS(DATA1_DATA_DCM_KNOWN) | @@ -284,6 +284,8 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, struct ieee80211_radiotap_he *he = NULL; u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; + status->flag |= RX_FLAG_RADIOTAP_HE; + he = skb_push(skb, sizeof(known)); memcpy(he, &known, sizeof(known)); @@ -298,7 +300,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); - switch (phy) { + switch (mode) { case MT_PHY_TYPE_HE_SU: he->data1 |= HE_BITS(DATA1_FORMAT_SU) | HE_BITS(DATA1_UL_DL_KNOWN) | @@ -322,6 +324,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb, he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]); mt7921_mac_decode_he_radiotap_ru(status, he, rxv); + mt7921_mac_decode_he_mu_radiotap(skb, rxv); break; case MT_PHY_TYPE_HE_TB: he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | @@ -395,6 +398,81 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) mt7921_mac_rssi_iter, skb); } +/* The HW does not translate the mac header to 802.3 for mesh point */ +static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid; + struct ieee80211_sta *sta; + struct ieee80211_vif *vif; + struct ieee80211_hdr hdr; + struct ethhdr eth_hdr; + __le32 *rxd = (__le32 *)skb->data; + __le32 qos_ctrl, ht_ctrl; + + if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) != + MT_RXD3_NORMAL_U2M) + return -EINVAL; + + if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) + return -EINVAL; + + if (!msta || !msta->vif) + return -EINVAL; + + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + /* store the info from RXD and ethhdr to avoid being overridden */ + memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr)); + hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]); + hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]); + qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]); + ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]); + + hdr.duration_id = 0; + ether_addr_copy(hdr.addr1, vif->addr); + ether_addr_copy(hdr.addr2, sta->addr); + switch (le16_to_cpu(hdr.frame_control) & + (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case 0: + ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); + break; + case IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_source); + break; + case IEEE80211_FCTL_TODS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + break; + case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: + ether_addr_copy(hdr.addr3, eth_hdr.h_dest); + ether_addr_copy(hdr.addr4, eth_hdr.h_source); + break; + default: + break; + } + + skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); + if (eth_hdr.h_proto == htons(ETH_P_AARP) || + eth_hdr.h_proto == htons(ETH_P_IPX)) + ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); + else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN)) + ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); + else + skb_pull(skb, 2); + + if (ieee80211_has_order(hdr.frame_control)) + memcpy(skb_push(skb, 2), &ht_ctrl, 2); + if (ieee80211_is_data_qos(hdr.frame_control)) + memcpy(skb_push(skb, 2), &qos_ctrl, 2); + if (ieee80211_has_a4(hdr.frame_control)) + memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); + else + memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); + + return 0; +} + static int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) { @@ -402,11 +480,11 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; bool hdr_trans, unicast, insert_ccmp_hdr = false; u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; + u16 hdr_gap; __le32 *rxv = NULL, *rxd = (__le32 *)skb->data; struct mt76_phy *mphy = &dev->mt76.phy; struct mt7921_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; - struct ieee80211_hdr *hdr; u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); @@ -428,10 +506,17 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) return -EINVAL; + hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; + if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) + return -EINVAL; + + /* ICV error or CCMP/BIP/WPI MIC error */ + if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) + status->flag |= RX_FLAG_ONLY_MONITOR; + chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3); unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); - hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; status->wcid = mt7921_rx_get_wcid(dev, idx, unicast); if (status->wcid) { @@ -612,15 +697,12 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) return -EINVAL; break; case MT_PHY_TYPE_HE_MU: - status->flag |= RX_FLAG_RADIOTAP_HE_MU; - fallthrough; case MT_PHY_TYPE_HE_SU: case MT_PHY_TYPE_HE_EXT_SU: case MT_PHY_TYPE_HE_TB: status->nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; status->encoding = RX_ENC_HE; - status->flag |= RX_FLAG_RADIOTAP_HE; i &= GENMASK(3, 0); if (gi <= NL80211_RATE_INFO_HE_GI_3_2) @@ -668,14 +750,21 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) } } - skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); - amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); status->amsdu = !!amsdu_info; if (status->amsdu) { status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; - if (!hdr_trans) { + } + + hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; + if (hdr_trans && ieee80211_has_morefrags(fc)) { + if (mt7921_reverse_frag0_hdr_trans(skb, hdr_gap)) + return -EINVAL; + hdr_trans = false; + } else { + skb_pull(skb, hdr_gap); + if (!hdr_trans && status->amsdu) { memmove(skb->data + 2, skb->data, ieee80211_get_hdrlen_from_skb(skb)); skb_pull(skb, 2); @@ -683,6 +772,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) } if (!hdr_trans) { + struct ieee80211_hdr *hdr; + if (insert_ccmp_hdr) { u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); @@ -696,19 +787,13 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) qos_ctl = *ieee80211_get_qos_ctl(hdr); } } else { - status->flag &= ~(RX_FLAG_RADIOTAP_HE | - RX_FLAG_RADIOTAP_HE_MU); status->flag |= RX_FLAG_8023; } mt7921_mac_assoc_rssi(dev, skb); - if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) { - mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); - - if (status->flag & RX_FLAG_RADIOTAP_HE_MU) - mt7921_mac_decode_he_mu_radiotap(skb, status, rxv); - } + if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) + mt7921_mac_decode_he_radiotap(skb, rxv, mode); if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; @@ -903,7 +988,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, mt7921_mac_write_txwi_80211(dev, txwi, skb, key); if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { - int rateidx = ffs(vif->bss_conf.basic_rates) - 1; + int rateidx = vif ? ffs(vif->bss_conf.basic_rates) - 1 : 0; u16 rate, mode; /* hardware won't add HTC for mgmt/ctrl frame */ @@ -1065,7 +1150,7 @@ out: return !!skb; } -static void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) +void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data) { struct mt7921_sta *msta = NULL; struct mt76_wcid *wcid; @@ -1314,6 +1399,7 @@ void mt7921_mac_reset_work(struct work_struct *work) } dev->hw_full_reset = false; + pm->suspended = false; ieee80211_wake_queues(hw); ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 633c6d2a57ac..7a8d2596c226 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -128,11 +128,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; + + if (is_mt7922(phy->mt76->dev)) { + he_cap_elem->phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + he_cap_elem->phy_cap_info[8] |= + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; + } break; } he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); + if (is_mt7922(phy->mt76->dev)) { + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); + } memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & @@ -140,7 +152,8 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, mt7921_gen_ppe_thresh(he_cap->ppe_thres, nss); } else { he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; + u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); } if (band == NL80211_BAND_6GHZ) { @@ -166,7 +179,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, if (vht_cap->cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) cap |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; - data->he_6ghz_capa.capa = cpu_to_le16(cap); + data[idx].he_6ghz_capa.capa = cpu_to_le16(cap); } idx++; } @@ -221,7 +234,7 @@ int __mt7921_start(struct mt7921_phy *phy) if (err) return err; - err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); + err = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH)); if (err) return err; @@ -318,12 +331,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, mtxq->wcid = &mvif->sta.wcid; } - if (vif->type != NL80211_IFTYPE_AP && - (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3)) - vif->offload_flags = 0; - - vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; - out: mt7921_mutex_release(dev); @@ -369,7 +376,7 @@ static int mt7921_set_channel(struct mt7921_phy *phy) mt76_set_channel(phy->mt76); - ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH); + ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); if (ret) goto out; @@ -462,7 +469,7 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - int ret; + int ret = 0; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); @@ -474,8 +481,11 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) mt7921_mutex_acquire(dev); - if (changed & IEEE80211_CONF_CHANGE_POWER) - mt76_connac_mcu_set_rate_txpower(phy->mt76); + if (changed & IEEE80211_CONF_CHANGE_POWER) { + ret = mt76_connac_mcu_set_rate_txpower(phy->mt76); + if (ret) + goto out; + } if (changed & IEEE80211_CONF_CHANGE_MONITOR) { bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); @@ -490,9 +500,10 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed) mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); } +out: mt7921_mutex_release(dev); - return 0; + return ret; } static int @@ -1238,7 +1249,6 @@ static int mt7921_suspend(struct ieee80211_hw *hw, { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - int err; cancel_delayed_work_sync(&phy->scan_work); cancel_delayed_work_sync(&phy->mt76->mac_work); @@ -1249,34 +1259,24 @@ static int mt7921_suspend(struct ieee80211_hw *hw, mt7921_mutex_acquire(dev); clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); - - set_bit(MT76_STATE_SUSPEND, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, mt76_connac_mcu_set_suspend_iter, &dev->mphy); - err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); - mt7921_mutex_release(dev); - return err; + return 0; } static int mt7921_resume(struct ieee80211_hw *hw) { struct mt7921_dev *dev = mt7921_hw_dev(hw); struct mt7921_phy *phy = mt7921_hw_phy(hw); - int err; mt7921_mutex_acquire(dev); - err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); - if (err < 0) - goto out; - set_bit(MT76_STATE_RUNNING, &phy->mt76->state); - clear_bit(MT76_STATE_SUSPEND, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, mt76_connac_mcu_set_suspend_iter, @@ -1284,11 +1284,10 @@ static int mt7921_resume(struct ieee80211_hw *hw) ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, MT7921_WATCHDOG_TIME); -out: mt7921_mutex_release(dev); - return err; + return 0; } static void mt7921_set_wakeup(struct ieee80211_hw *hw, bool enabled) @@ -1334,41 +1333,23 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, - MCU_UNI_CMD_STA_REC_UPDATE); + MCU_UNI_CMD(STA_REC_UPDATE)); } static int mt7921_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { - const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt76_freq_range_power *data, *frp; struct mt76_phy *mphy = hw->priv; int err; - u32 i; - - if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) - return -EINVAL; mt7921_mutex_acquire(dev); - - data = mphy->frp; - - for (i = 0; i < sar->num_sub_specs; i++) { - u32 index = sar->sub_specs[i].freq_range_index; - /* SAR specifies power limitaton in 0.25dbm */ - s32 power = sar->sub_specs[i].power >> 1; - - if (power > 127 || power < -127) - power = 127; - - frp = &data[index]; - frp->range = &capa->freq_ranges[index]; - frp->power = power; - } + err = mt76_init_sar_power(hw, sar); + if (err) + goto out; err = mt76_connac_mcu_set_rate_txpower(mphy); - +out: mt7921_mutex_release(dev); return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 6ada1ebe7d68..ef1e1ef91611 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -93,9 +93,6 @@ struct mt7921_fw_region { #define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0) #define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0) -#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) -#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) - static enum mcu_cipher_type mt7921_mcu_get_cipher(int cipher) { @@ -163,8 +160,8 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb) int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq) { + int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); struct mt7921_mcu_rxd *rxd; - int mcu_cmd = cmd & MCU_CMD_MASK; int ret = 0; if (!skb) { @@ -179,24 +176,20 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, if (seq != rxd->seq) return -EAGAIN; - switch (cmd) { - case MCU_CMD_PATCH_SEM_CONTROL: + if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { skb_pull(skb, sizeof(*rxd) - 4); ret = *skb->data; - break; - case MCU_EXT_CMD_GET_TEMP: + } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { skb_pull(skb, sizeof(*rxd) + 4); ret = le32_to_cpu(*(__le32 *)skb->data); - break; - case MCU_EXT_CMD_EFUSE_ACCESS: + } else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) { ret = mt7921_mcu_parse_eeprom(mdev, skb); - break; - case MCU_UNI_CMD_DEV_INFO_UPDATE: - case MCU_UNI_CMD_BSS_INFO_UPDATE: - case MCU_UNI_CMD_STA_REC_UPDATE: - case MCU_UNI_CMD_HIF_CTRL: - case MCU_UNI_CMD_OFFLOAD: - case MCU_UNI_CMD_SUSPEND: { + } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || + cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || + cmd == MCU_UNI_CMD(STA_REC_UPDATE) || + cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(OFFLOAD) || + cmd == MCU_UNI_CMD(SUSPEND)) { struct mt7921_mcu_uni_event *event; skb_pull(skb, sizeof(*rxd)); @@ -205,19 +198,14 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, /* skip invalid event */ if (mcu_cmd != event->cid) ret = -EAGAIN; - break; - } - case MCU_CMD_REG_READ: { + } else if (cmd == MCU_CE_QUERY(REG_READ)) { struct mt7921_mcu_reg_event *event; skb_pull(skb, sizeof(*rxd)); event = (struct mt7921_mcu_reg_event *)skb->data; ret = (int)le32_to_cpu(event->val); - break; - } - default: + } else { skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); - break; } return ret; @@ -228,32 +216,28 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *wait_seq) { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); - int txd_len, mcu_cmd = cmd & MCU_CMD_MASK; + int txd_len, mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); struct mt7921_uni_txd *uni_txd; struct mt7921_mcu_txd *mcu_txd; __le32 *txd; u32 val; u8 seq; - switch (cmd) { - case MCU_UNI_CMD_HIF_CTRL: - case MCU_UNI_CMD_SUSPEND: - case MCU_UNI_CMD_OFFLOAD: - mdev->mcu.timeout = HZ / 3; - break; - default: + if (cmd == MCU_UNI_CMD(HIF_CTRL) || + cmd == MCU_UNI_CMD(SUSPEND) || + cmd == MCU_UNI_CMD(OFFLOAD)) + mdev->mcu.timeout = HZ; + else mdev->mcu.timeout = 3 * HZ; - break; - } seq = ++dev->mt76.mcu.msg_seq & 0xf; if (!seq) seq = ++dev->mt76.mcu.msg_seq & 0xf; - if (cmd == MCU_CMD_FW_SCATTER) + if (cmd == MCU_CMD(FW_SCATTER)) goto exit; - txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd); txd = (__le32 *)skb_push(skb, txd_len); val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) | @@ -265,7 +249,7 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD); txd[1] = cpu_to_le32(val); - if (cmd & MCU_UNI_PREFIX) { + if (cmd & __MCU_CMD_FIELD_UNI) { uni_txd = (struct mt7921_uni_txd *)txd; uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); uni_txd->option = MCU_CMD_UNI_EXT_ACK; @@ -283,34 +267,20 @@ int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, MT_TX_MCU_PORT_RX_Q0)); mcu_txd->pkt_type = MCU_PKT_ID; mcu_txd->seq = seq; + mcu_txd->cid = mcu_cmd; + mcu_txd->s2d_index = MCU_S2D_H2N; + mcu_txd->ext_cid = FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd); - switch (cmd & ~MCU_CMD_MASK) { - case MCU_FW_PREFIX: - mcu_txd->set_query = MCU_Q_NA; - mcu_txd->cid = mcu_cmd; - break; - case MCU_CE_PREFIX: - if (cmd & MCU_QUERY_MASK) - mcu_txd->set_query = MCU_Q_QUERY; - else - mcu_txd->set_query = MCU_Q_SET; - mcu_txd->cid = mcu_cmd; - break; - default: - mcu_txd->cid = MCU_CMD_EXT_CID; - if (cmd & MCU_QUERY_PREFIX || cmd == MCU_EXT_CMD_EFUSE_ACCESS) + if (mcu_txd->ext_cid || (cmd & __MCU_CMD_FIELD_CE)) { + if (cmd & __MCU_CMD_FIELD_QUERY) mcu_txd->set_query = MCU_Q_QUERY; else mcu_txd->set_query = MCU_Q_SET; - mcu_txd->ext_cid = mcu_cmd; - mcu_txd->ext_cid_ack = 1; - break; + mcu_txd->ext_cid_ack = !!mcu_txd->ext_cid; + } else { + mcu_txd->set_query = MCU_Q_NA; } - mcu_txd->s2d_index = MCU_S2D_H2N; - WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS && - mcu_txd->set_query != MCU_Q_QUERY); - exit: if (wait_seq) *wait_seq = seq; @@ -419,6 +389,17 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb) } static void +mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_tx_done_event *event; + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + event = (struct mt7921_mcu_tx_done_event *)skb->data; + + mt7921_mac_add_txs(dev, event->txs); +} + +static void mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) { struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; @@ -445,6 +426,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) case MCU_EVENT_LP_INFO: mt7921_mcu_low_power_event(dev, skb); break; + case MCU_EVENT_TX_DONE: + mt7921_mcu_tx_done_event(dev, skb); + break; default: break; } @@ -567,7 +551,7 @@ int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, return ret; return mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + MCU_UNI_CMD(STA_REC_UPDATE), true); } int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, @@ -602,7 +586,7 @@ int mt7921_mcu_restart(struct mt76_dev *dev) .power_mode = 1, }; - return mt76_mcu_send_msg(dev, MCU_CMD_NIC_POWER_CTRL, &req, + return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt7921_mcu_restart); @@ -708,7 +692,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev) goto out; } - ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), dl, len, max_len); if (ret) { dev_err(dev->mt76.dev, "Failed to send patch\n"); @@ -720,6 +704,17 @@ static int mt7921_load_patch(struct mt7921_dev *dev) if (ret) dev_err(dev->mt76.dev, "Failed to start patch\n"); + if (mt76_is_sdio(&dev->mt76)) { + /* activate again */ + ret = __mt7921_mcu_fw_pmctrl(dev); + if (ret) + return ret; + + ret = __mt7921_mcu_drv_pmctrl(dev); + if (ret) + return ret; + } + out: sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); switch (sem) { @@ -782,7 +777,7 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, return err; } - err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER), data + offset, len, max_len); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware.\n"); @@ -882,7 +877,7 @@ fw_loaded: dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; #endif /* CONFIG_PM */ - dev_err(dev->mt76.dev, "Firmware init done\n"); + dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; } @@ -896,8 +891,8 @@ int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) .ctrl_val = ctrl }; - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FWLOG_2_HOST, &data, - sizeof(data), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST), + &data, sizeof(data), false); } int mt7921_run_firmware(struct mt7921_dev *dev) @@ -997,8 +992,8 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) e->cw_max = cpu_to_le16(10); } - ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, - sizeof(req), true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), + &req, sizeof(req), true); if (ret) return ret; @@ -1022,8 +1017,8 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) e->timer = q->mu_edca_timer; } - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_MU_EDCA_PARMS, &req_mu, - sizeof(req_mu), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS), + &req_mu, sizeof(req_mu), false); } int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) @@ -1070,7 +1065,7 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) else req.switch_reason = CH_SWITCH_NORMAL; - if (cmd == MCU_EXT_CMD_CHANNEL_SWITCH) + if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) req.rx_streams = hweight8(req.rx_streams); if (chandef->width == NL80211_CHAN_WIDTH_80P80) { @@ -1093,7 +1088,7 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) .format = EE_FORMAT_WHOLE, }; - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), &req, sizeof(req), true); } EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); @@ -1108,8 +1103,9 @@ int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset) int ret; u8 *buf; - ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req, - sizeof(req), true, &skb); + ret = mt76_mcu_send_and_get_msg(&dev->mt76, + MCU_EXT_QUERY(EFUSE_ACCESS), + &req, sizeof(req), true, &skb); if (ret) return ret; @@ -1154,7 +1150,7 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) if (vif->type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), &ps_req, sizeof(ps_req), true); } @@ -1190,7 +1186,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, if (vif->type != NL80211_IFTYPE_STATION) return 0; - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), &bcnft_req, sizeof(bcnft_req), true); } @@ -1226,13 +1222,13 @@ mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, if (vif->type != NL80211_IFTYPE_STATION) return 0; - err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr, - sizeof(req_hdr), false); + err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), + &req_hdr, sizeof(req_hdr), false); if (err < 0 || !enable) return err; - return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), + &req, sizeof(req), false); } int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, @@ -1245,7 +1241,7 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta, .sta = sta, .vif = vif, .enable = enable, - .cmd = MCU_UNI_CMD_STA_REC_UPDATE, + .cmd = MCU_UNI_CMD(STA_REC_UPDATE), .state = state, .offload_fw = true, .rcpi = to_rcpi(rssi), @@ -1342,7 +1338,7 @@ int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) struct sk_buff *skb; int ret; - ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR, + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR), &req, sizeof(req), true, &skb); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h index edc0c73f8c01..77cc0cc5b436 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h @@ -76,20 +76,32 @@ struct mt7921_uni_txd { u8 reserved2[4]; } __packed __aligned(4); -/* event table */ -enum { - MCU_EVENT_REG_ACCESS = 0x05, - MCU_EVENT_LP_INFO = 0x07, - MCU_EVENT_SCAN_DONE = 0x0d, - MCU_EVENT_TX_DONE = 0x0f, - MCU_EVENT_BSS_ABSENCE = 0x11, - MCU_EVENT_BSS_BEACON_LOSS = 0x13, - MCU_EVENT_CH_PRIVILEGE = 0x18, - MCU_EVENT_SCHED_SCAN_DONE = 0x23, - MCU_EVENT_DBG_MSG = 0x27, - MCU_EVENT_TXPWR = 0xd0, - MCU_EVENT_COREDUMP = 0xf0, -}; +struct mt7921_mcu_tx_done_event { + u8 pid; + u8 status; + __le16 seq; + + u8 wlan_idx; + u8 tx_cnt; + __le16 tx_rate; + + u8 flag; + u8 tid; + u8 rsp_rate; + u8 mcs; + + u8 bw; + u8 tx_pwr; + u8 reason; + u8 rsv0[1]; + + __le32 delay; + __le32 timestamp; + __le32 applied_flag; + u8 txs[28]; + + u8 rsv1[32]; +} __packed; /* ext event table */ enum { @@ -126,20 +138,6 @@ struct mt7921_mcu_eeprom_info { #define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10)) #define MCU_PKT_ID 0xa0 -enum { - MCU_Q_QUERY, - MCU_Q_SET, - MCU_Q_RESERVED, - MCU_Q_NA -}; - -enum { - MCU_S2D_H2N, - MCU_S2D_C2N, - MCU_S2D_H2C, - MCU_S2D_H2CN -}; - struct mt7921_mcu_uni_event { u8 cid; u8 pad[3]; @@ -147,109 +145,10 @@ struct mt7921_mcu_uni_event { } __packed; enum { - PATCH_NOT_DL_SEM_FAIL, - PATCH_IS_DL, - PATCH_NOT_DL_SEM_SUCCESS, - PATCH_REL_SEM_SUCCESS -}; - -enum { - FW_STATE_INITIAL, - FW_STATE_FW_DOWNLOAD, - FW_STATE_NORMAL_OPERATION, - FW_STATE_NORMAL_TRX, - FW_STATE_WACPU_RDY = 7 -}; - -enum { - EE_MODE_EFUSE, - EE_MODE_BUFFER, -}; - -enum { - EE_FORMAT_BIN, - EE_FORMAT_WHOLE, - EE_FORMAT_MULTIPLE, -}; - -enum { - MCU_PHY_STATE_TX_RATE, - MCU_PHY_STATE_RX_RATE, - MCU_PHY_STATE_RSSI, - MCU_PHY_STATE_CONTENTION_RX_RATE, - MCU_PHY_STATE_OFDMLQ_CNINFO, -}; - -struct sec_key { - u8 cipher_id; - u8 cipher_len; - u8 key_id; - u8 key_len; - u8 key[32]; -} __packed; - -struct sta_rec_sec { - __le16 tag; - __le16 len; - u8 add; - u8 n_cipher; - u8 rsv[2]; - - struct sec_key key[2]; -} __packed; - -enum mcu_cipher_type { - MCU_CIPHER_NONE = 0, - MCU_CIPHER_WEP40, - MCU_CIPHER_WEP104, - MCU_CIPHER_WEP128, - MCU_CIPHER_TKIP, - MCU_CIPHER_AES_CCMP, - MCU_CIPHER_CCMP_256, - MCU_CIPHER_GCMP, - MCU_CIPHER_GCMP_256, - MCU_CIPHER_WAPI, - MCU_CIPHER_BIP_CMAC_128, -}; - -enum { - CH_SWITCH_NORMAL = 0, - CH_SWITCH_SCAN = 3, - CH_SWITCH_MCC = 4, - CH_SWITCH_DFS = 5, - CH_SWITCH_BACKGROUND_SCAN_START = 6, - CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7, - CH_SWITCH_BACKGROUND_SCAN_STOP = 8, - CH_SWITCH_SCAN_BYPASS_DPD = 9 -}; - -enum { - THERMAL_SENSOR_TEMP_QUERY, - THERMAL_SENSOR_MANUAL_CTRL, - THERMAL_SENSOR_INFO_QUERY, - THERMAL_SENSOR_TASK_CTRL, -}; - -enum { MT_EBF = BIT(0), /* explicit beamforming */ MT_IBF = BIT(1) /* implicit beamforming */ }; -#define STA_CAP_WMM BIT(0) -#define STA_CAP_SGI_20 BIT(4) -#define STA_CAP_SGI_40 BIT(5) -#define STA_CAP_TX_STBC BIT(6) -#define STA_CAP_RX_STBC BIT(7) -#define STA_CAP_VHT_SGI_80 BIT(16) -#define STA_CAP_VHT_SGI_160 BIT(17) -#define STA_CAP_VHT_TX_STBC BIT(18) -#define STA_CAP_VHT_RX_STBC BIT(19) -#define STA_CAP_VHT_LDPC BIT(23) -#define STA_CAP_LDPC BIT(24) -#define STA_CAP_HT BIT(26) -#define STA_CAP_VHT BIT(27) -#define STA_CAP_HE BIT(28) - struct mt7921_mcu_reg_event { __le32 reg; __le32 val; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index e9c7c3a19507..96647801850a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -446,6 +446,7 @@ int mt7921_mcu_restart(struct mt76_dev *dev); void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); +int mt7921e_driver_own(struct mt7921_dev *dev); int mt7921e_mac_reset(struct mt7921_dev *dev); int mt7921e_mcu_init(struct mt7921_dev *dev); int mt7921s_wfsys_reset(struct mt7921_dev *dev); @@ -463,4 +464,5 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct mt76_tx_info *tx_info); void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update); +void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 305b63fa1a8a..9dae2f5972bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -15,6 +15,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) }, { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608) }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616) }, { }, }; @@ -188,7 +190,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) | (mt7921_l1_rr(dev, MT_HW_REV) & 0xff); - dev_err(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); @@ -235,7 +237,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) struct mt76_dev *mdev = pci_get_drvdata(pdev); struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct mt76_connac_pm *pm = &dev->pm; - bool hif_suspend; int i, err; pm->suspended = true; @@ -246,12 +247,9 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) if (err < 0) goto restore_suspend; - hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state); - if (hif_suspend) { - err = mt76_connac_mcu_set_hif_suspend(mdev, true); - if (err) - goto restore_suspend; - } + err = mt76_connac_mcu_set_hif_suspend(mdev, true); + if (err) + goto restore_suspend; /* always enable deep sleep during suspend to reduce * power consumption @@ -302,8 +300,7 @@ restore_napi: if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); - if (hif_suspend) - mt76_connac_mcu_set_hif_suspend(mdev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false); restore_suspend: pm->suspended = false; @@ -318,7 +315,6 @@ static int mt7921_pci_resume(struct pci_dev *pdev) struct mt76_connac_pm *pm = &dev->pm; int i, err; - pm->suspended = false; err = pci_set_power_state(pdev, PCI_D0); if (err) return err; @@ -356,8 +352,11 @@ static int mt7921_pci_resume(struct pci_dev *pdev) if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); - if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state)) - err = mt76_connac_mcu_set_hif_suspend(mdev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + if (err) + return err; + + pm->suspended = false; return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index f9547d27356e..85286cc9add1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -321,6 +321,10 @@ int mt7921e_mac_reset(struct mt7921_dev *dev) MT_INT_MCU_CMD); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); + err = mt7921e_driver_own(dev); + if (err) + return err; + err = mt7921_run_firmware(dev); if (err) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c index 583a89a34734..a020352122a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c @@ -4,7 +4,7 @@ #include "mt7921.h" #include "mcu.h" -static int mt7921e_driver_own(struct mt7921_dev *dev) +int mt7921e_driver_own(struct mt7921_dev *dev) { u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0); @@ -30,7 +30,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (ret) return ret; - if (cmd == MCU_CMD_FW_SCATTER) + if (cmd == MCU_CMD(FW_SCATTER)) txq = MT_MCUQ_FWDL; return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index ddf0eeb8b688..65d693902c22 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -62,6 +62,10 @@ static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr) if (err < 0) return err; + if (irq_data->rx.num[0] > 16 || + irq_data->rx.num[1] > 128) + return -EINVAL; + intr->isr = irq_data->isr; intr->rec_mb = irq_data->rec_mb; intr->tx.wtqcr = irq_data->tx.wtqcr; @@ -203,10 +207,11 @@ static int mt7921s_suspend(struct device *__dev) struct mt7921_dev *dev = sdio_get_drvdata(func); struct mt76_connac_pm *pm = &dev->pm; struct mt76_dev *mdev = &dev->mt76; - bool hif_suspend; int err; pm->suspended = true; + set_bit(MT76_STATE_SUSPEND, &mdev->phy.state); + cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); @@ -214,13 +219,6 @@ static int mt7921s_suspend(struct device *__dev) if (err < 0) goto restore_suspend; - hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state); - if (hif_suspend) { - err = mt76_connac_mcu_set_hif_suspend(mdev, true); - if (err) - goto restore_suspend; - } - /* always enable deep sleep during suspend to reduce * power consumption */ @@ -228,35 +226,45 @@ static int mt7921s_suspend(struct device *__dev) mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&mdev->tx_worker); - mt76_worker_disable(&mdev->sdio.txrx_worker); mt76_worker_disable(&mdev->sdio.status_worker); - mt76_worker_disable(&mdev->sdio.net_worker); cancel_work_sync(&mdev->sdio.stat_work); clear_bit(MT76_READING_STATS, &dev->mphy.state); - mt76_tx_status_check(mdev, true); - err = mt7921_mcu_fw_pmctrl(dev); + mt76_worker_schedule(&mdev->sdio.txrx_worker); + wait_event_timeout(dev->mt76.sdio.wait, + mt76s_txqs_empty(&dev->mt76), 5 * HZ); + + /* It is supposed that SDIO bus is idle at the point */ + err = mt76_connac_mcu_set_hif_suspend(mdev, true); if (err) goto restore_worker; + mt76_worker_disable(&mdev->sdio.txrx_worker); + mt76_worker_disable(&mdev->sdio.net_worker); + + err = mt7921_mcu_fw_pmctrl(dev); + if (err) + goto restore_txrx_worker; + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); return 0; +restore_txrx_worker: + mt76_worker_enable(&mdev->sdio.net_worker); + mt76_worker_enable(&mdev->sdio.txrx_worker); + mt76_connac_mcu_set_hif_suspend(mdev, false); + restore_worker: mt76_worker_enable(&mdev->tx_worker); - mt76_worker_enable(&mdev->sdio.txrx_worker); mt76_worker_enable(&mdev->sdio.status_worker); - mt76_worker_enable(&mdev->sdio.net_worker); if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(mdev, false); - if (hif_suspend) - mt76_connac_mcu_set_hif_suspend(mdev, false); - restore_suspend: + clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state); pm->suspended = false; return err; @@ -270,7 +278,7 @@ static int mt7921s_resume(struct device *__dev) struct mt76_dev *mdev = &dev->mt76; int err; - pm->suspended = false; + clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state); err = mt7921_mcu_drv_pmctrl(dev); if (err < 0) @@ -285,8 +293,11 @@ static int mt7921s_resume(struct device *__dev) if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(mdev, false); - if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state)) - err = mt76_connac_mcu_set_hif_suspend(mdev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false); + if (err) + return err; + + pm->suspended = false; return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 137f86a6dbf8..ccaf8134cec7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -142,15 +142,11 @@ out: static void mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid, enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int pid, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; + __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); - txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE); memset(txwi, 0, MT_SDIO_TXD_SIZE); mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false); skb_push(skb, MT_SDIO_TXD_SIZE); @@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, { struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; struct sk_buff *skb = tx_info->skb; - int pad; + int err, pad, pktid; if (unlikely(tx_info->skb->len <= ETH_HLEN)) return -EINVAL; @@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, } } - mt7921s_write_txwi(dev, wcid, qid, sta, skb); + pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb); mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA); pad = round_up(skb->len, 4) - skb->len; - return mt76_skb_adjust_pad(skb, pad); + err = mt76_skb_adjust_pad(skb, pad); + if (err) + /* Release pktid in case of error. */ + idr_remove(&wcid->pktid, pktid); + + return err; } void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) @@ -216,5 +219,5 @@ bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update) mt7921_mac_sta_poll(dev); mt7921_mutex_release(dev); - return 0; + return false; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c index 437cddad9a90..d20f2ff01be1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c @@ -33,7 +33,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, if (ret) return ret; - if (cmd == MCU_CMD_FW_SCATTER) + if (cmd == MCU_CMD(FW_SCATTER)) type = MT7921_SDIO_FWDL; mt7921_skb_add_sdio_hdr(skb, type); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c index 8bd43879dd6f..bdec8684ce94 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c @@ -66,7 +66,7 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req) if (!mt76_testmode_enabled(phy)) goto out; - ret = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TEST_CTRL, &cmd, + ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL), &cmd, sizeof(cmd), false); if (ret) goto out; @@ -95,7 +95,7 @@ mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req, struct sk_buff *skb; int ret; - ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_TEST_CTRL, + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(TEST_CTRL), &cmd, sizeof(cmd), true, &skb); if (ret) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index c99acc21225e..54f72d215948 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -305,12 +305,12 @@ int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, - MT_NUM_RX_ENTRIES, sizeof(*q->entry), + MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; - q->ndesc = MT_NUM_RX_ENTRIES; + q->ndesc = MT76S_NUM_RX_ENTRIES; q->head = q->tail = 0; q->queued = 0; @@ -328,12 +328,12 @@ static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) spin_lock_init(&q->lock); q->entry = devm_kcalloc(dev->dev, - MT_NUM_TX_ENTRIES, sizeof(*q->entry), + MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return ERR_PTR(-ENOMEM); - q->ndesc = MT_NUM_TX_ENTRIES; + q->ndesc = MT76S_NUM_TX_ENTRIES; return q; } @@ -479,7 +479,8 @@ static void mt76s_status_worker(struct mt76_worker *w) resched = true; if (dev->drv->tx_status_data && - !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) + !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && + !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) queue_work(dev->wq, &dev->sdio.stat_work); } while (nframes > 0); diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c index 649a56790b89..801590a0a334 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c @@ -317,7 +317,8 @@ void mt76s_txrx_worker(struct mt76_sdio *sdio) if (ret > 0) nframes += ret; - if (test_bit(MT76_MCU_RESET, &dev->phy.state)) { + if (test_bit(MT76_MCU_RESET, &dev->phy.state) || + test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) { if (!mt76s_txqs_empty(dev)) continue; else diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index 66afc2b0a935..1a01ad7a4c16 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -126,9 +126,9 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len) hdr = __skb_put_zero(head, head_len); hdr->frame_control = cpu_to_le16(fc); - memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr)); - memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr)); - memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr)); + memcpy(hdr->addr1, td->addr[0], ETH_ALEN); + memcpy(hdr->addr2, td->addr[1], ETH_ALEN); + memcpy(hdr->addr3, td->addr[2], ETH_ALEN); skb_set_queue_mapping(head, IEEE80211_AC_BE); info = IEEE80211_SKB_CB(head); @@ -318,6 +318,10 @@ mt76_testmode_init_defaults(struct mt76_phy *phy) td->tx_count = 1; td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; td->tx_rate_nss = 1; + + memcpy(td->addr[0], phy->macaddr, ETH_ALEN); + memcpy(td->addr[1], phy->macaddr, ETH_ALEN); + memcpy(td->addr[2], phy->macaddr, ETH_ALEN); } static int @@ -493,6 +497,20 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } } + if (tb[MT76_TM_ATTR_MAC_ADDRS]) { + struct nlattr *cur; + int idx = 0; + int rem; + + nla_for_each_nested(cur, tb[MT76_TM_ATTR_MAC_ADDRS], rem) { + if (nla_len(cur) != ETH_ALEN || idx >= 3) + goto out; + + memcpy(td->addr[idx], nla_data(cur), ETH_ALEN); + idx++; + } + } + if (dev->test_ops->set_params) { err = dev->test_ops->set_params(phy, tb, state); if (err) @@ -635,6 +653,18 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, nla_nest_end(msg, a); } + if (mt76_testmode_param_present(td, MT76_TM_ATTR_MAC_ADDRS)) { + a = nla_nest_start(msg, MT76_TM_ATTR_MAC_ADDRS); + if (!a) + goto out; + + for (i = 0; i < 3; i++) + if (nla_put(msg, i, ETH_ALEN, td->addr[i])) + goto out; + + nla_nest_end(msg, a); + } + err = 0; out: diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h index d1f9c036dd1f..0590c35c7126 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.h +++ b/drivers/net/wireless/mediatek/mt76/testmode.h @@ -7,6 +7,8 @@ #define MT76_TM_TIMEOUT 10 +#include <net/netlink.h> + /** * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA * @@ -45,6 +47,8 @@ * @MT76_TM_ATTR_TX_TIME: packet transmission time, in unit of us (u32) * * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested) + * + * @MT76_TM_ATTR_MAC_ADDRS: array of nested MAC addresses (nested) */ enum mt76_testmode_attr { MT76_TM_ATTR_UNSPEC, @@ -81,6 +85,8 @@ enum mt76_testmode_attr { MT76_TM_ATTR_DRV_DATA, + MT76_TM_ATTR_MAC_ADDRS, + /* keep last */ NUM_MT76_TM_ATTRS, MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 11719ef034d8..6b8c9dc80542 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, if (!(cb->flags & MT_TX_CB_DMA_DONE)) continue; - if (!time_is_after_jiffies(cb->jiffies + + if (time_is_after_jiffies(cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) continue; } diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index dc4bfe7be378..8d8378bafd9b 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1737,23 +1737,15 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, INIT_LIST_HEAD(&wl->rxq_head.list); INIT_LIST_HEAD(&wl->vif_list); - wl->hif_workqueue = create_singlethread_workqueue("WILC_wq"); - if (!wl->hif_workqueue) { - ret = -ENOMEM; - goto free_cfg; - } vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE, NL80211_IFTYPE_STATION, false); if (IS_ERR(vif)) { ret = PTR_ERR(vif); - goto free_hq; + goto free_cfg; } return 0; -free_hq: - destroy_workqueue(wl->hif_workqueue); - free_cfg: wilc_wlan_cfg_deinit(wl); diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index e69b9c7f3d31..71b44cfe0dfc 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1312,7 +1312,7 @@ int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr) result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) - netdev_err(vif->ndev, "Failed to get mac address\n"); + netdev_err(vif->ndev, "Failed to set mac address\n"); return result; } @@ -1929,6 +1929,7 @@ int wilc_edit_station(struct wilc_vif *vif, const u8 *mac, int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout) { + struct wilc *wilc = vif->wilc; struct wid wid; int result; s8 power_mode; @@ -1944,6 +1945,8 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout) result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send power management\n"); + else + wilc->power_save_mode = enabled; return result; } diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 690572e01a2a..643bddaae32a 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -27,7 +27,7 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) struct wilc *wilc = user_data; if (wilc->close) { - pr_err("Can't handle UH interrupt"); + pr_err("Can't handle UH interrupt\n"); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; @@ -56,7 +56,7 @@ static int init_irq(struct net_device *dev) ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine, isr_bh_routine, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "WILC_IRQ", wl); + dev->name, wl); if (ret) { netdev_err(dev, "Failed to request IRQ [%d]\n", ret); return ret; @@ -468,7 +468,7 @@ static int wlan_initialize_threads(struct net_device *dev) struct wilc *wilc = vif->wilc; wilc->txq_thread = kthread_run(wilc_txq_task, (void *)wilc, - "K_TXQ_TASK"); + "%s-tx", dev->name); if (IS_ERR(wilc->txq_thread)) { netdev_err(dev, "couldn't create TXQ thread\n"); wilc->close = 0; @@ -574,6 +574,7 @@ static int wilc_mac_open(struct net_device *ndev) struct wilc *wl = vif->wilc; int ret = 0; struct mgmt_frame_regs mgmt_regs = {}; + u8 addr[ETH_ALEN] __aligned(2); if (!wl || !wl->dev) { netdev_err(ndev, "device not ready\n"); @@ -596,10 +597,9 @@ static int wilc_mac_open(struct net_device *ndev) vif->idx); if (is_valid_ether_addr(ndev->dev_addr)) { - wilc_set_mac_address(vif, ndev->dev_addr); + ether_addr_copy(addr, ndev->dev_addr); + wilc_set_mac_address(vif, addr); } else { - u8 addr[ETH_ALEN]; - wilc_get_mac_address(vif, addr); eth_hw_addr_set(ndev, addr); } @@ -905,7 +905,6 @@ void wilc_netdev_cleanup(struct wilc *wilc) wilc_wlan_cfg_deinit(wilc); wlan_deinit_locks(wilc); - kfree(wilc->bus_data); wiphy_unregister(wilc->wiphy); wiphy_free(wilc->wiphy); } @@ -962,8 +961,15 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, ret = register_netdev(ndev); if (ret) { - free_netdev(ndev); - return ERR_PTR(-EFAULT); + ret = -EFAULT; + goto error; + } + + wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM, + ndev->name); + if (!wl->hif_workqueue) { + ret = -ENOMEM; + goto error; } ndev->needs_free_netdev = true; @@ -977,6 +983,10 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, synchronize_srcu(&wl->srcu); return vif; + + error: + free_netdev(ndev); + return ERR_PTR(ret); } MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index b9a88b3e322f..a067274c2014 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -212,6 +212,8 @@ struct wilc { s8 mac_status; struct clk *rtc_clk; bool initialized; + u32 chipid; + bool power_save_mode; int dev_irq_num; int close; u8 vif_num; diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 26ebf6664342..ec595dbd8959 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -167,9 +167,11 @@ free: static void wilc_sdio_remove(struct sdio_func *func) { struct wilc *wilc = sdio_get_drvdata(func); + struct wilc_sdio *sdio_priv = wilc->bus_data; clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); + kfree(sdio_priv); } static int wilc_sdio_reset(struct wilc *wilc) diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 640850f989dd..2c2ed4b09efd 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -8,10 +8,13 @@ #include <linux/spi/spi.h> #include <linux/crc7.h> #include <linux/crc-itu-t.h> +#include <linux/gpio/consumer.h> #include "netdev.h" #include "cfg80211.h" +#define SPI_MODALIAS "wilc1000_spi" + static bool enable_crc7; /* protect SPI commands with CRC7 */ module_param(enable_crc7, bool, 0644); MODULE_PARM_DESC(enable_crc7, @@ -43,6 +46,10 @@ struct wilc_spi { bool probing_crc; /* true if we're probing chip's CRC config */ bool crc7_enabled; /* true if crc7 is currently enabled */ bool crc16_enabled; /* true if crc16 is currently enabled */ + struct wilc_gpios { + struct gpio_desc *enable; /* ENABLE GPIO or NULL */ + struct gpio_desc *reset; /* RESET GPIO or NULL */ + } gpios; }; static const struct wilc_hif_func wilc_hif_spi; @@ -99,8 +106,6 @@ static int wilc_spi_reset(struct wilc *wilc); #define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX #define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ) -#define USE_SPI_DMA 0 - #define WILC_SPI_COMMAND_STAT_SUCCESS 0 #define WILC_GET_RESP_HDR_START(h) (((h) >> 4) & 0xf) @@ -152,6 +157,50 @@ struct wilc_spi_special_cmd_rsp { u8 status; } __packed; +static int wilc_parse_gpios(struct wilc *wilc) +{ + struct spi_device *spi = to_spi_device(wilc->dev); + struct wilc_spi *spi_priv = wilc->bus_data; + struct wilc_gpios *gpios = &spi_priv->gpios; + + /* get ENABLE pin and deassert it (if it is defined): */ + gpios->enable = devm_gpiod_get_optional(&spi->dev, + "enable", GPIOD_OUT_LOW); + /* get RESET pin and assert it (if it is defined): */ + if (gpios->enable) { + /* if enable pin exists, reset must exist as well */ + gpios->reset = devm_gpiod_get(&spi->dev, + "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpios->reset)) { + dev_err(&spi->dev, "missing reset gpio.\n"); + return PTR_ERR(gpios->reset); + } + } else { + gpios->reset = devm_gpiod_get_optional(&spi->dev, + "reset", GPIOD_OUT_HIGH); + } + return 0; +} + +static void wilc_wlan_power(struct wilc *wilc, bool on) +{ + struct wilc_spi *spi_priv = wilc->bus_data; + struct wilc_gpios *gpios = &spi_priv->gpios; + + if (on) { + /* assert ENABLE: */ + gpiod_set_value(gpios->enable, 1); + mdelay(5); + /* deassert RESET: */ + gpiod_set_value(gpios->reset, 0); + } else { + /* assert RESET: */ + gpiod_set_value(gpios->reset, 1); + /* deassert ENABLE: */ + gpiod_set_value(gpios->enable, 0); + } +} + static int wilc_bus_probe(struct spi_device *spi) { int ret; @@ -171,6 +220,10 @@ static int wilc_bus_probe(struct spi_device *spi) wilc->bus_data = spi_priv; wilc->dev_irq_num = spi->irq; + ret = wilc_parse_gpios(wilc); + if (ret < 0) + goto netdev_cleanup; + wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc"); if (IS_ERR(wilc->rtc_clk)) { ret = PTR_ERR(wilc->rtc_clk); @@ -190,9 +243,11 @@ free: static int wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); + struct wilc_spi *spi_priv = wilc->bus_data; clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); + kfree(spi_priv); return 0; } @@ -203,11 +258,18 @@ static const struct of_device_id wilc_of_match[] = { }; MODULE_DEVICE_TABLE(of, wilc_of_match); +static const struct spi_device_id wilc_spi_id[] = { + { "wilc1000", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, wilc_spi_id); + static struct spi_driver wilc_spi_driver = { .driver = { - .name = MODALIAS, + .name = SPI_MODALIAS, .of_match_table = wilc_of_match, }, + .id_table = wilc_spi_id, .probe = wilc_bus_probe, .remove = wilc_bus_remove, }; @@ -240,7 +302,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len) memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; - msg.is_dma_mapped = USE_SPI_DMA; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); @@ -284,7 +345,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen) memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; - msg.is_dma_mapped = USE_SPI_DMA; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); @@ -323,7 +383,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; - msg.is_dma_mapped = USE_SPI_DMA; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); @@ -977,9 +1036,10 @@ static int wilc_spi_reset(struct wilc *wilc) static int wilc_spi_deinit(struct wilc *wilc) { - /* - * TODO: - */ + struct wilc_spi *spi_priv = wilc->bus_data; + + spi_priv->isinit = false; + wilc_wlan_power(wilc, false); return 0; } @@ -1000,6 +1060,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) dev_err(&spi->dev, "Fail cmd read chip id...\n"); } + wilc_wlan_power(wilc, true); + /* * configure protocol */ diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index ea81ef120fd1..fb5633a05fd5 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -20,13 +20,13 @@ static inline bool is_wilc1000(u32 id) static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire) { mutex_lock(&wilc->hif_cs); - if (acquire == WILC_BUS_ACQUIRE_AND_WAKEUP) + if (acquire == WILC_BUS_ACQUIRE_AND_WAKEUP && wilc->power_save_mode) chip_wakeup(wilc); } static inline void release_bus(struct wilc *wilc, enum bus_release release) { - if (release == WILC_BUS_RELEASE_ALLOW_SLEEP) + if (release == WILC_BUS_RELEASE_ALLOW_SLEEP && wilc->power_save_mode) chip_allow_sleep(wilc); mutex_unlock(&wilc->hif_cs); } @@ -626,7 +626,6 @@ void chip_wakeup(struct wilc *wilc) u32 clk_status_val = 0, trials = 0; u32 wakeup_reg, wakeup_bit; u32 clk_status_reg, clk_status_bit; - u32 to_host_from_fw_reg, to_host_from_fw_bit; u32 from_host_to_fw_reg, from_host_to_fw_bit; const struct wilc_hif_func *hif_func = wilc->hif_func; @@ -637,8 +636,6 @@ void chip_wakeup(struct wilc *wilc) clk_status_bit = WILC_SDIO_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; - to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; - to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; } else { wakeup_reg = WILC_SPI_WAKEUP_REG; wakeup_bit = WILC_SPI_WAKEUP_BIT; @@ -646,8 +643,6 @@ void chip_wakeup(struct wilc *wilc) clk_status_bit = WILC_SPI_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; - to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; - to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; } /* indicate host wakeup */ @@ -1229,7 +1224,8 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif) ret = 0; release: - release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); + /* host comm is disabled - we can't issue sleep command anymore: */ + release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; } @@ -1258,7 +1254,7 @@ void wilc_wlan_cleanup(struct net_device *dev) wilc->rx_buffer = NULL; kfree(wilc->tx_buffer); wilc->tx_buffer = NULL; - wilc->hif_func->hif_deinit(NULL); + wilc->hif_func->hif_deinit(wilc); } static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type, @@ -1447,31 +1443,30 @@ release: u32 wilc_get_chipid(struct wilc *wilc, bool update) { - static u32 chipid; - u32 tempchipid = 0; + u32 chipid = 0; u32 rfrevid = 0; - if (chipid == 0 || update) { - wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &tempchipid); + if (wilc->chipid == 0 || update) { + wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid); wilc->hif_func->hif_read_reg(wilc, WILC_RF_REVISION_ID, &rfrevid); - if (!is_wilc1000(tempchipid)) { - chipid = 0; - return chipid; + if (!is_wilc1000(chipid)) { + wilc->chipid = 0; + return wilc->chipid; } - if (tempchipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */ + if (chipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */ if (rfrevid != 0x1) - tempchipid = WILC_1000_BASE_ID_2A_REV1; - } else if (tempchipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */ + chipid = WILC_1000_BASE_ID_2A_REV1; + } else if (chipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */ if (rfrevid == 0x4) - tempchipid = WILC_1000_BASE_ID_2B_REV1; + chipid = WILC_1000_BASE_ID_2B_REV1; else if (rfrevid != 0x3) - tempchipid = WILC_1000_BASE_ID_2B_REV2; + chipid = WILC_1000_BASE_ID_2B_REV2; } - chipid = tempchipid; + wilc->chipid = chipid; } - return chipid; + return wilc->chipid; } int wilc_wlan_init(struct net_device *dev) diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index 13fde636aa0e..eb7978166d73 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -213,8 +213,6 @@ #define WILC_RX_BUFF_SIZE (96 * 1024) #define WILC_TX_BUFF_SIZE (64 * 1024) -#define MODALIAS "WILC_SPI" - #define NQUEUES 4 #define AC_BUFFER_SIZE 1000 diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e4473a551241..74c3d8cb3100 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) if (status == -ENODEV || status == -ENOENT) return true; + if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) + return false; + if (status == -EPROTO || status == -ETIMEDOUT) rt2x00dev->num_proto_errs++; else diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index a42e2081b75f..06d59ffb7444 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4859,7 +4859,7 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled */ tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT); - if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) { tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE); tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE); } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { @@ -4930,7 +4930,7 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, /* * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled */ - if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { + if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) { tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE); tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE); } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 6312fddd9c00..eaba66113328 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) _initpabias(hw); rtl92c_dm_init(hw); exit: + local_irq_disable(); local_irq_restore(flags); return err; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 9b83c710c9b8..51fe51bb0504 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -899,7 +899,7 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) u8 place = chnl; if (chnl > 14) { - for (place = 14; place < sizeof(channel5g); place++) { + for (place = 14; place < ARRAY_SIZE(channel5g); place++) { if (channel5g[place] == chnl) { place++; break; @@ -1366,7 +1366,7 @@ u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) u8 place; if (chnl > 14) { - for (place = 14; place < sizeof(channel_all); place++) { + for (place = 14; place < ARRAY_SIZE(channel_all); place++) { if (channel_all[place] == chnl) return place - 13; } @@ -2428,7 +2428,7 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) int i; - for (i = 0; i < sizeof(channel5g); i++) + for (i = 0; i < ARRAY_SIZE(channel5g); i++) if (channel == channel5g[i]) return true; return false; @@ -2692,9 +2692,8 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw) u8 i; rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "settings regs %d default regs %d\n", - (int)(sizeof(rtlphy->iqk_matrix) / - sizeof(struct iqk_matrix_regs)), + "settings regs %zu default regs %d\n", + ARRAY_SIZE(rtlphy->iqk_matrix), IQK_MATRIX_REG_NUM); /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */ for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { @@ -2861,16 +2860,14 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) case BAND_ON_5G: /* Get first channel error when change between * 5G and 2.4G band. */ - if (channel <= 14) + if (WARN_ONCE(channel <= 14, "rtl8192de: 5G but channel<=14\n")) return 0; - WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n"); break; case BAND_ON_2_4G: /* Get first channel error when change between * 5G and 2.4G band. */ - if (channel > 14) + if (WARN_ONCE(channel > 14, "rtl8192de: 2G but channel>14\n")) return 0; - WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n"); break; default: WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index aa07856411b1..31f9e9e5c680 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -108,7 +108,6 @@ #define CHANNEL_GROUP_IDX_5GM 6 #define CHANNEL_GROUP_IDX_5GH 9 #define CHANNEL_GROUP_MAX_5G 9 -#define CHANNEL_MAX_NUMBER_2G 14 #define AVG_THERMAL_NUM 8 #define AVG_THERMAL_NUM_88E 4 #define AVG_THERMAL_NUM_8723BE 4 diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index 73d6807a8cdf..834c66ec0af9 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -15,6 +15,7 @@ rtw88_core-y += main.o \ ps.o \ sec.o \ bf.o \ + sar.o \ regd.o rtw88_core-$(CONFIG_PM) += wow.o diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c index aff70e4ae028..df750b3a35e9 100644 --- a/drivers/net/wireless/realtek/rtw88/bf.c +++ b/drivers/net/wireless/realtek/rtw88/bf.c @@ -130,7 +130,8 @@ void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif, BIT_WMAC_USE_NDPARATE | (csi_rsc << 13); - rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_SOUNDING); rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26); rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL); rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL); @@ -177,7 +178,7 @@ void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev) void rtw_bf_del_sounding(struct rtw_dev *rtwdev) { - rtw_write8(rtwdev, REG_SND_PTCL_CTRL, 0); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, 0); } void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, @@ -204,7 +205,8 @@ void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, } /* Sounding protocol control */ - rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_SOUNDING); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_SOUNDING); /* MAC address/Partial AID of Beamformer */ for (i = 0; i < ETH_ALEN; i++) @@ -273,7 +275,8 @@ void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bf_info *bfinfo = &rtwdev->bf_info; rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n"); - rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_REMOVE); switch (bfee->su_reg_index) { case 0: @@ -298,7 +301,8 @@ void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, { struct rtw_bf_info *bfinfo = &rtwdev->bf_info; - rtw_write8(rtwdev, REG_SND_PTCL_CTRL, RTW_SND_CTRL_REMOVE); + rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, + RTW_SND_CTRL_REMOVE); rtw_bf_del_bfer_entry_mu(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/bf.h b/drivers/net/wireless/realtek/rtw88/bf.h index 17855edb5006..7b40c2c03856 100644 --- a/drivers/net/wireless/realtek/rtw88/bf.h +++ b/drivers/net/wireless/realtek/rtw88/bf.h @@ -13,6 +13,9 @@ #define REG_ASSOCIATED_BFMER1_INFO 0x06EC #define REG_TX_CSI_RPT_PARAM_BW20 0x06F4 #define REG_SND_PTCL_CTRL 0x0718 +#define BIT_DIS_CHK_VHTSIGB_CRC BIT(6) +#define BIT_DIS_CHK_VHTSIGA_CRC BIT(5) +#define BIT_MASK_BEAMFORM (GENMASK(4, 0) | BIT(7)) #define REG_MU_TX_CTL 0x14C0 #define REG_MU_STA_GID_VLD 0x14C4 #define REG_MU_STA_USER_POS_INFO 0x14C8 @@ -42,8 +45,8 @@ #define BIT_RXFLTMAP4_BF_REPORT_POLL BIT(4) #define RTW_NDP_RX_STANDBY_TIME 0x70 -#define RTW_SND_CTRL_REMOVE 0xD8 -#define RTW_SND_CTRL_SOUNDING 0xDB +#define RTW_SND_CTRL_REMOVE 0x98 +#define RTW_SND_CTRL_SOUNDING 0x9B enum csi_seg_len { HAL_CSI_SEG_4K = 0, diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 682b23502e6e..e429428232c1 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -152,6 +152,22 @@ static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v) return 0; } +static int rtw_debugfs_get_fix_rate(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 fix_rate = dm_info->fix_rate; + + if (fix_rate >= DESC_RATE_MAX) { + seq_printf(m, "Fix rate disabled, fix_rate = %u\n", fix_rate); + return 0; + } + + seq_printf(m, "Data frames fixed at desc rate %u\n", fix_rate); + return 0; +} + static int rtw_debugfs_copy_from_user(char tmp[], int size, const char __user *buffer, size_t count, int num) @@ -437,6 +453,31 @@ static ssize_t rtw_debugfs_set_rf_read(struct file *filp, return count; } +static ssize_t rtw_debugfs_set_fix_rate(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 fix_rate; + char tmp[32 + 1]; + int ret; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); + + ret = kstrtou8(tmp, 0, &fix_rate); + if (ret) { + rtw_warn(rtwdev, "invalid args, [rate]\n"); + return ret; + } + + dm_info->fix_rate = fix_rate; + + return count; +} + static int rtw_debug_get_mac_page(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; @@ -590,9 +631,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) u8 ch = hal->current_channel; u8 regd = rtw_regd_get(rtwdev); + seq_printf(m, "channel: %u\n", ch); + seq_printf(m, "bandwidth: %u\n", bw); seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd)); - seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n", - "path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem"); + seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n", + "path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem"); mutex_lock(&hal->tx_power_mutex); for (path = RF_PATH_A; path <= RF_PATH_B; path++) { @@ -614,13 +657,15 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) seq_printf(m, "%4c ", path + 'A'); rtw_print_rate(m, rate); - seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d) %4d\n", + seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d %4d) %4d\n", hal->tx_pwr_tbl[path][rate], hal->tx_pwr_tbl[path][rate], pwr_param.pwr_base, - min_t(s8, pwr_param.pwr_offset, - pwr_param.pwr_limit), + min3(pwr_param.pwr_offset, + pwr_param.pwr_limit, + pwr_param.pwr_sar), pwr_param.pwr_offset, pwr_param.pwr_limit, + pwr_param.pwr_sar, pwr_param.pwr_remnant); } } @@ -904,6 +949,39 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) return 0; } +static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + bool input; + int err; + + err = kstrtobool_from_user(buffer, count, &input); + if (err) + return err; + + if (input) + set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + else + clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + + return count; +} + +static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "force lowest basic rate: %d\n", + test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags)); + + return 0; +} + static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) @@ -1061,6 +1139,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_read_reg = { .cb_read = rtw_debugfs_get_read_reg, }; +static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = { + .cb_write = rtw_debugfs_set_fix_rate, + .cb_read = rtw_debugfs_get_fix_rate, +}; + static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = { .cb_write = rtw_debugfs_set_single_input, .cb_read = rtw_debugfs_get_dump_cam, @@ -1094,6 +1177,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { .cb_read = rtw_debugfs_get_fw_crash, }; +static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = { + .cb_write = rtw_debugfs_set_force_lowest_basic_rate, + .cb_read = rtw_debugfs_get_force_lowest_basic_rate, +}; + static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { .cb_write = rtw_debugfs_set_dm_cap, .cb_read = rtw_debugfs_get_dm_cap, @@ -1126,6 +1214,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_rw(read_reg); rtw_debugfs_add_w(rf_write); rtw_debugfs_add_rw(rf_read); + rtw_debugfs_add_rw(fix_rate); rtw_debugfs_add_rw(dump_cam); rtw_debugfs_add_rw(rsvd_page); rtw_debugfs_add_r(phy_info); @@ -1174,6 +1263,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(tx_pwr_tbl); rtw_debugfs_add_rw(edcca_enable); rtw_debugfs_add_rw(fw_crash); + rtw_debugfs_add_rw(force_lowest_basic_rate); rtw_debugfs_add_rw(dm_cap); } diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index 47c57f395f52..61f8369fe2d6 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -22,6 +22,7 @@ enum rtw_debug_mask { RTW_DBG_CFO = 0x00002000, RTW_DBG_PATH_DIV = 0x00004000, RTW_DBG_ADAPTIVITY = 0x00008000, + RTW_DBG_HW_SCAN = 0x00010000, RTW_DBG_ALL = 0xffffffff }; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 0c4f2a2f2d7f..2f7c036f9022 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -28,6 +28,12 @@ static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, case C2H_CCX_RPT: rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT); break; + case C2H_SCAN_STATUS_RPT: + rtw_hw_scan_status_report(rtwdev, skb); + break; + case C2H_CHAN_SWITCH: + rtw_hw_scan_chan_switch(rtwdev, skb); + break; default: break; } @@ -1777,3 +1783,385 @@ void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start) rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } + +static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb, + struct sk_buff_head *list, + struct rtw_vif *rtwvif) +{ + struct ieee80211_scan_ies *ies = rtwvif->scan_ies; + struct rtw_chip_info *chip = rtwdev->chip; + struct sk_buff *new; + u8 idx; + + for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { + if (!(BIT(idx) & chip->band)) + continue; + new = skb_copy(skb, GFP_KERNEL); + skb_put_data(new, ies->ies[idx], ies->len[idx]); + skb_put_data(new, ies->common_ies, ies->common_ie_len); + skb_queue_tail(list, new); + } +} + +static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids, + struct sk_buff_head *probe_req_list) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct sk_buff *skb, *tmp; + u8 page_offset = 1, *buf, page_size = chip->page_size; + u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT; + u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc; + u16 buf_offset = page_size * page_offset; + u8 tx_desc_sz = chip->tx_pkt_desc_sz; + unsigned int pkt_len; + int ret; + + buf = kzalloc(page_size * pages, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf_offset -= tx_desc_sz; + skb_queue_walk_safe(probe_req_list, skb, tmp) { + skb_unlink(skb, probe_req_list); + rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ); + if (skb->len > page_size * RTW_PROBE_PG_CNT) { + ret = -EINVAL; + goto out; + } + + memcpy(buf + buf_offset, skb->data, skb->len); + pkt_len = skb->len - tx_desc_sz; + loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset; + __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc); + + buf_offset += RTW_PROBE_PG_CNT * page_size; + page_offset += RTW_PROBE_PG_CNT; + kfree_skb(skb); + } + + ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset); + if (ret) { + rtw_err(rtwdev, "Download probe request to firmware failed\n"); + goto out; + } + + rtwdev->scan_info.probe_pg_size = page_offset; +out: + kfree(buf); + + return ret; +} + +static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct sk_buff_head list; + struct sk_buff *skb; + u8 num = req->n_ssids, i; + + skb_queue_head_init(&list); + for (i = 0; i < num; i++) { + skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, + req->ssids[i].ssid, + req->ssids[i].ssid_len, + req->ie_len); + rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif); + kfree_skb(skb); + } + + return _rtw_hw_scan_update_probe_req(rtwdev, num, &list); +} + +static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info, + struct rtw_chan_list *list, u8 *buf) +{ + u8 *chan = &buf[list->size]; + u8 info_size = RTW_CH_INFO_SIZE; + + if (list->size > list->buf_size) + return -ENOMEM; + + CH_INFO_SET_CH(chan, info->channel); + CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx); + CH_INFO_SET_BW(chan, info->bw); + CH_INFO_SET_TIMEOUT(chan, info->timeout); + CH_INFO_SET_ACTION_ID(chan, info->action_id); + CH_INFO_SET_EXTRA_INFO(chan, info->extra_info); + if (info->extra_info) { + EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS); + EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN); + EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE - + RTW_EX_CH_INFO_HDR_SIZE); + EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME); + info_size += RTW_EX_CH_INFO_SIZE; + } + list->size += info_size; + list->ch_num++; + + return 0; +} + +static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + struct rtw_chan_list *list, u8 *buf) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + struct ieee80211_channel *channel; + int i, ret = 0; + + for (i = 0; i < req->n_channels; i++) { + struct rtw_chan_info ch_info = {0}; + + channel = req->channels[i]; + ch_info.channel = channel->hw_value; + ch_info.bw = RTW_SCAN_WIDTH; + ch_info.pri_ch_idx = RTW_PRI_CH_IDX; + ch_info.timeout = req->duration_mandatory ? + req->duration : RTW_CHANNEL_TIME; + + if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) { + ch_info.action_id = RTW_CHANNEL_RADAR; + ch_info.extra_info = 1; + /* Overwrite duration for passive scans if necessary */ + ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ? + ch_info.timeout : RTW_PASS_CHAN_TIME; + } else { + ch_info.action_id = RTW_CHANNEL_ACTIVE; + } + + ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf); + if (ret) + return ret; + } + + if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) { + rtw_err(rtwdev, "List exceeds rsvd page total size\n"); + return -EINVAL; + } + + list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size; + ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size); + if (ret) + rtw_err(rtwdev, "Download channel list failed\n"); + + return ret; +} + +static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev, + struct rtw_ch_switch_option *opt, + struct rtw_vif *rtwvif, + struct rtw_chan_list *list) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + struct cfg80211_scan_request *req = rtwvif->scan_req; + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + /* reserve one dummy page at the beginning for tx descriptor */ + u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1; + bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; + + rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD); + SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN); + + SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en); + SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en); + SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq); + SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck); + SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num); + SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size); + SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary); + SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan); + SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx); + SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw); + SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port); + SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ? + req->duration : RTW_CHANNEL_TIME); + SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME); + SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids); + SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc); + + rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); +} + +void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct cfg80211_scan_request *req = &scan_req->req; + u8 mac_addr[ETH_ALEN]; + + rtwdev->scan_info.scanning_vif = vif; + rtwvif->scan_ies = &scan_req->ies; + rtwvif->scan_req = req; + + ieee80211_stop_queues(rtwdev->hw); + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + get_random_mask_addr(mac_addr, req->mac_addr, + req->mac_addr_mask); + else + ether_addr_copy(mac_addr, vif->addr); + + rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true); + + rtwdev->hal.rcr &= ~BIT_CBSSID_BCN; + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); +} + +void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + struct rtw_vif *rtwvif; + + if (!vif) + return; + + rtwdev->hal.rcr |= BIT_CBSSID_BCN; + rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); + + rtw_core_scan_complete(rtwdev, vif); + + ieee80211_wake_queues(rtwdev->hw); + ieee80211_scan_completed(rtwdev->hw, &info); + + rtwvif = (struct rtw_vif *)vif->drv_priv; + rtwvif->scan_req = NULL; + rtwvif->scan_ies = NULL; + rtwdev->scan_info.scanning_vif = NULL; +} + +static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + struct rtw_chan_list *list) +{ + struct cfg80211_scan_request *req = rtwvif->scan_req; + int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE); + u8 *buf; + int ret; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif); + if (ret) { + rtw_err(rtwdev, "Update probe request failed\n"); + goto out; + } + + list->buf_size = size; + list->size = 0; + list->ch_num = 0; + ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf); +out: + kfree(buf); + + return ret; +} + +int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool enable) +{ + struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL; + struct rtw_ch_switch_option cs_option = {0}; + struct rtw_chan_list chan_list = {0}; + int ret = 0; + + if (!rtwvif) + return -EINVAL; + + cs_option.switch_en = enable; + cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED; + if (enable) { + ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list); + if (ret) + goto out; + } + rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list); +out: + return ret; +} + +void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) +{ + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return; + + rtw_hw_scan_offload(rtwdev, vif, false); + rtw_hw_scan_complete(rtwdev, vif, true); +} + +void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; + struct rtw_c2h_cmd *c2h; + bool aborted; + u8 rc; + + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return; + + c2h = get_c2h_from_skb(skb); + rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload); + aborted = rc != RTW_SCAN_REPORT_SUCCESS; + rtw_hw_scan_complete(rtwdev, vif, aborted); + + if (aborted) + rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc); +} + +void rtw_store_op_chan(struct rtw_dev *rtwdev) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw_hal *hal = &rtwdev->hal; + + scan_info->op_chan = hal->current_channel; + scan_info->op_bw = hal->current_band_width; + scan_info->op_pri_ch_idx = hal->current_primary_channel_index; +} + +static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel) +{ + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + + return channel == scan_info->op_chan; +} + +void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_c2h_cmd *c2h; + enum rtw_scan_notify_id id; + u8 chan, status; + + c2h = get_c2h_from_skb(skb); + chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload); + id = GET_CHAN_SWITCH_ID(c2h->payload); + status = GET_CHAN_SWITCH_STATUS(c2h->payload); + + if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) { + if (rtw_is_op_chan(rtwdev, chan)) + ieee80211_wake_queues(rtwdev->hw); + hal->current_channel = chan; + hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; + } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) { + if (IS_CH_5G_BAND(chan)) { + rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); + } else if (IS_CH_2G_BAND(chan)) { + u8 chan_type; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + chan_type = COEX_SWITCH_TO_24G; + else + chan_type = COEX_SWITCH_TO_24G_NOFORSCAN; + rtw_coex_switchband_notify(rtwdev, chan_type); + } + if (rtw_is_op_chan(rtwdev, chan)) + ieee80211_stop_queues(rtwdev->hw); + } + + rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, + "Chan switch: %x, id: %x, status: %x\n", chan, id, status); +} diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 09c7afb99e63..654c3c2e5721 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -32,6 +32,17 @@ #define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10) +#define RTW_CHANNEL_TIME 45 +#define RTW_OFF_CHAN_TIME 100 +#define RTW_PASS_CHAN_TIME 105 +#define RTW_DFS_CHAN_TIME 20 +#define RTW_CH_INFO_SIZE 4 +#define RTW_EX_CH_INFO_SIZE 3 +#define RTW_EX_CH_INFO_HDR_SIZE 2 +#define RTW_SCAN_WIDTH 0 +#define RTW_PRI_CH_IDX 1 +#define RTW_PROBE_PG_CNT 2 + enum rtw_c2h_cmd_id { C2H_CCX_TX_RPT = 0x03, C2H_BT_INFO = 0x09, @@ -48,7 +59,9 @@ enum rtw_c2h_cmd_id { }; enum rtw_c2h_cmd_id_ext { + C2H_SCAN_STATUS_RPT = 0x3, C2H_CCX_RPT = 0x0f, + C2H_CHAN_SWITCH = 0x22, }; struct rtw_c2h_cmd { @@ -98,9 +111,11 @@ enum rtw_fw_feature { FW_FEATURE_LPS_C2H = BIT(1), FW_FEATURE_LCLK = BIT(2), FW_FEATURE_PG = BIT(3), + FW_FEATURE_TX_WAKE = BIT(4), FW_FEATURE_BCN_FILTER = BIT(5), FW_FEATURE_NOTIFY_SCAN = BIT(6), FW_FEATURE_ADAPTIVITY = BIT(7), + FW_FEATURE_SCAN_OFFLOAD = BIT(8), FW_FEATURE_MAX = BIT(31), }; @@ -196,6 +211,51 @@ struct rtw_fw_wow_disconnect_para { u8 retry_count; }; +enum rtw_channel_type { + RTW_CHANNEL_PASSIVE, + RTW_CHANNEL_ACTIVE, + RTW_CHANNEL_RADAR, +}; + +enum rtw_scan_extra_id { + RTW_SCAN_EXTRA_ID_DFS, +}; + +enum rtw_scan_extra_info { + RTW_SCAN_EXTRA_ACTION_SCAN, +}; + +enum rtw_scan_report_code { + RTW_SCAN_REPORT_SUCCESS = 0x00, + RTW_SCAN_REPORT_ERR_PHYDM = 0x01, + RTW_SCAN_REPORT_ERR_ID = 0x02, + RTW_SCAN_REPORT_ERR_TX = 0x03, + RTW_SCAN_REPORT_CANCELED = 0x10, + RTW_SCAN_REPORT_CANCELED_EXT = 0x11, + RTW_SCAN_REPORT_FW_DISABLED = 0xF0, +}; + +enum rtw_scan_notify_id { + RTW_SCAN_NOTIFY_ID_PRESWITCH = 0x00, + RTW_SCAN_NOTIFY_ID_POSTSWITCH = 0x01, + RTW_SCAN_NOTIFY_ID_PROBE_PRETX = 0x02, + RTW_SCAN_NOTIFY_ID_PROBE_ISSUETX = 0x03, + RTW_SCAN_NOTIFY_ID_NULL0_PRETX = 0x04, + RTW_SCAN_NOTIFY_ID_NULL0_ISSUETX = 0x05, + RTW_SCAN_NOTIFY_ID_NULL0_POSTTX = 0x06, + RTW_SCAN_NOTIFY_ID_NULL1_PRETX = 0x07, + RTW_SCAN_NOTIFY_ID_NULL1_ISSUETX = 0x08, + RTW_SCAN_NOTIFY_ID_NULL1_POSTTX = 0x09, + RTW_SCAN_NOTIFY_ID_DWELLEXT = 0x0A, +}; + +enum rtw_scan_notify_status { + RTW_SCAN_NOTIFY_STATUS_SUCCESS = 0x00, + RTW_SCAN_NOTIFY_STATUS_FAILURE = 0x01, + RTW_SCAN_NOTIFY_STATUS_RESOURCE = 0x02, + RTW_SCAN_NOTIFY_STATUS_TIMEOUT = 0x03, +}; + struct rtw_ch_switch_option { u8 periodic_option; u32 tsf_high; @@ -209,6 +269,8 @@ struct rtw_ch_switch_option { u8 slow_period; u8 slow_period_sel; u8 nlo_en; + bool switch_en; + bool back_op_en; }; struct rtw_fw_hdr { @@ -265,6 +327,11 @@ struct rtw_fw_hdr_legacy { #define GET_CCX_REPORT_SEQNUM_V1(c2h_payload) (c2h_payload[8] & 0xfc) #define GET_CCX_REPORT_STATUS_V1(c2h_payload) (c2h_payload[9] & 0xc0) +#define GET_SCAN_REPORT_RETURN_CODE(c2h_payload) (c2h_payload[2] & 0xff) + +#define GET_CHAN_SWITCH_CENTRAL_CH(c2h_payload) (c2h_payload[2]) +#define GET_CHAN_SWITCH_ID(c2h_payload) (c2h_payload[3]) +#define GET_CHAN_SWITCH_STATUS(c2h_payload) (c2h_payload[4]) #define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f) #define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7) #define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6]) @@ -284,6 +351,7 @@ struct rtw_fw_hdr_legacy { #define H2C_PKT_CH_SWITCH 0x02 #define H2C_PKT_UPDATE_PKT 0x0C +#define H2C_PKT_SCAN_OFFLOAD 0x19 #define H2C_PKT_CH_SWITCH_LEN 0x20 #define H2C_PKT_UPDATE_PKT_LEN 0x4 @@ -334,6 +402,30 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(23, 16)) #define CHSW_INFO_SET_ACTION_ID(pkt, value) \ le32p_replace_bits((__le32 *)(pkt) + 0x00, value, GENMASK(30, 24)) +#define CHSW_INFO_SET_EXTRA_INFO(pkt, value) \ + le32p_replace_bits((__le32 *)(pkt) + 0x00, value, BIT(31)) + +#define CH_INFO_SET_CH(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x00, value, GENMASK(7, 0)) +#define CH_INFO_SET_PRI_CH_IDX(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(3, 0)) +#define CH_INFO_SET_BW(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x01, value, GENMASK(7, 4)) +#define CH_INFO_SET_TIMEOUT(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x02, value, GENMASK(7, 0)) +#define CH_INFO_SET_ACTION_ID(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x03, value, GENMASK(6, 0)) +#define CH_INFO_SET_EXTRA_INFO(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x03, value, BIT(7)) + +#define EXTRA_CH_INFO_SET_ID(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x04, value, GENMASK(6, 0)) +#define EXTRA_CH_INFO_SET_INFO(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x04, value, BIT(7)) +#define EXTRA_CH_INFO_SET_SIZE(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x05, value, GENMASK(7, 0)) +#define EXTRA_CH_INFO_SET_DFS_EXT_TIME(pkt, value) \ + u8p_replace_bits((u8 *)(pkt) + 0x06, value, GENMASK(7, 0)) #define UPDATE_PKT_SET_SIZE(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 0)) @@ -350,12 +442,18 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2)) #define CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(4, 3)) +#define CH_SWITCH_SET_SCAN_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(5)) +#define CH_SWITCH_SET_BACK_OP_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(6)) #define CH_SWITCH_SET_INFO_LOC(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8)) #define CH_SWITCH_SET_CH_NUM(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16)) #define CH_SWITCH_SET_PRI_CH_IDX(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24)) +#define CH_SWITCH_SET_DEST_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28)) #define CH_SWITCH_SET_DEST_CH(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0)) #define CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, value) \ @@ -375,6 +473,41 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define CH_SWITCH_SET_INFO_SIZE(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x06, value, GENMASK(15, 0)) +#define SCAN_OFFLOAD_SET_START(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0)) +#define SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1)) +#define SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(2)) +#define SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(3)) +#define SCAN_OFFLOAD_SET_VERBOSE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(4)) +#define SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8)) +#define SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 16)) +#define SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(7, 0)) +#define SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(15, 8)) +#define SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(19, 16)) +#define SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(23, 20)) +#define SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x03, value, GENMASK(26, 24)) +#define SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(15, 0)) +#define SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x04, value, GENMASK(31, 16)) +#define SCAN_OFFLOAD_SET_MODE(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(3, 0)) +#define SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(7, 4)) +#define SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, value) \ + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x05, value, GENMASK(15, 8)) + /* Command H2C */ #define H2C_CMD_RSVD_PAGE 0x0 #define H2C_CMD_MEDIA_STATUS_RPT 0x01 @@ -686,4 +819,14 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, u32 *buffer); void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start); void rtw_fw_adaptivity(struct rtw_dev *rtwdev); +void rtw_store_op_chan(struct rtw_dev *rtwdev); +void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req); +void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool aborted); +int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, + bool enable); +void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb); +void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb); +void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 6f5629852416..ae7d97de5fdf 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -13,6 +13,7 @@ #include "bf.h" #include "debug.h" #include "wow.h" +#include "sar.h" static void rtw_ops_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, @@ -161,6 +162,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw, rtwvif->stats.rx_unicast = 0; rtwvif->stats.tx_cnt = 0; rtwvif->stats.rx_cnt = 0; + rtwvif->scan_req = NULL; memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee)); rtwvif->conf = &rtw_vif_port[port]; rtw_txq_init(rtwdev, vif->txq); @@ -372,9 +374,15 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_coex_media_status_notify(rtwdev, conf->assoc); if (rtw_bf_support) rtw_bf_assoc(rtwdev, vif, conf); + rtw_store_op_chan(rtwdev); } else { rtw_leave_lps(rtwdev); rtw_bf_disassoc(rtwdev, vif, conf); + /* Abort ongoing scan if cancel_scan isn't issued + * when disconnected by peer + */ + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + rtw_hw_scan_abort(rtwdev, vif); } config |= PORT_SET_NET_TYPE; @@ -594,22 +602,9 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw, { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; - u32 config = 0; mutex_lock(&rtwdev->mutex); - - rtw_leave_lps(rtwdev); - - ether_addr_copy(rtwvif->mac_addr, mac_addr); - config |= PORT_SET_MAC_ADDR; - rtw_vif_port_config(rtwdev, rtwvif, config); - - rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); - rtw_core_fw_scan_notify(rtwdev, true); - - set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); - set_bit(RTW_FLAG_SCANNING, rtwdev->flags); - + rtw_core_scan_start(rtwdev, rtwvif, mac_addr, false); mutex_unlock(&rtwdev->mutex); } @@ -617,22 +612,9 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; - struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; - u32 config = 0; mutex_lock(&rtwdev->mutex); - - clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); - clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); - - rtw_core_fw_scan_notify(rtwdev, false); - - ether_addr_copy(rtwvif->mac_addr, vif->addr); - config |= PORT_SET_MAC_ADDR; - rtw_vif_port_config(rtwdev, rtwvif, config); - - rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); - + rtw_core_scan_complete(rtwdev, vif); mutex_unlock(&rtwdev->mutex); } @@ -815,6 +797,56 @@ static void rtw_reconfig_complete(struct ieee80211_hw *hw, mutex_unlock(&rtwdev->mutex); } +static int rtw_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct rtw_dev *rtwdev = hw->priv; + int ret; + + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return 1; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return -EBUSY; + + mutex_lock(&rtwdev->mutex); + rtw_hw_scan_start(rtwdev, vif, req); + ret = rtw_hw_scan_offload(rtwdev, vif, true); + if (ret) { + rtw_hw_scan_abort(rtwdev, vif); + rtw_err(rtwdev, "HW scan failed with status: %d\n", ret); + } + mutex_unlock(&rtwdev->mutex); + + return ret; +} + +static void rtw_ops_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + return; + + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return; + + mutex_lock(&rtwdev->mutex); + rtw_hw_scan_abort(rtwdev, vif); + mutex_unlock(&rtwdev->mutex); +} + +static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct rtw_dev *rtwdev = hw->priv; + + rtw_set_sar_specs(rtwdev, sar); + + return 0; +} + const struct ieee80211_ops rtw_ops = { .tx = rtw_ops_tx, .wake_tx_queue = rtw_ops_wake_tx_queue, @@ -842,6 +874,9 @@ const struct ieee80211_ops rtw_ops = { .set_antenna = rtw_ops_set_antenna, .get_antenna = rtw_ops_get_antenna, .reconfig_complete = rtw_reconfig_complete, + .hw_scan = rtw_ops_hw_scan, + .cancel_hw_scan = rtw_ops_cancel_hw_scan, + .set_sar_specs = rtw_ops_set_sar_specs, #ifdef CONFIG_PM .suspend = rtw_ops_suspend, .resume = rtw_ops_resume, diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index a0d4d6e31fb4..38252113c4a8 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -17,6 +17,7 @@ #include "tx.h" #include "debug.h" #include "bf.h" +#include "sar.h" bool rtw_disable_lps_deep_mode; EXPORT_SYMBOL(rtw_disable_lps_deep_mode); @@ -637,6 +638,19 @@ static void rtw_txq_ba_work(struct work_struct *work) rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data); } +void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) +{ + if (IS_CH_2G_BAND(channel)) + pkt_stat->band = NL80211_BAND_2GHZ; + else if (IS_CH_5G_BAND(channel)) + pkt_stat->band = NL80211_BAND_5GHZ; + else + return; + + pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band); +} +EXPORT_SYMBOL(rtw_set_rx_freq_band); + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *chan_params) { @@ -735,8 +749,28 @@ void rtw_set_channel(struct rtw_dev *rtwdev) hal->current_band_width = bandwidth; hal->current_channel = center_chan; + hal->current_primary_channel_index = primary_chan_idx; hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; + switch (center_chan) { + case 1 ... 14: + hal->sar_band = RTW_SAR_BAND_0; + break; + case 36 ... 64: + hal->sar_band = RTW_SAR_BAND_1; + break; + case 100 ... 144: + hal->sar_band = RTW_SAR_BAND_3; + break; + case 149 ... 177: + hal->sar_band = RTW_SAR_BAND_4; + break; + default: + WARN(1, "unknown ch(%u) to SAR band\n", center_chan); + hal->sar_band = RTW_SAR_BAND_0; + break; + } + for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++) hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; @@ -1278,6 +1312,50 @@ void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) } } +void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + const u8 *mac_addr, bool hw_scan) +{ + u32 config = 0; + int ret = 0; + + rtw_leave_lps(rtwdev); + + if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) { + ret = rtw_leave_ips(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to leave idle state\n"); + return; + } + } + + ether_addr_copy(rtwvif->mac_addr, mac_addr); + config |= PORT_SET_MAC_ADDR; + rtw_vif_port_config(rtwdev, rtwvif, config); + + rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); + rtw_core_fw_scan_notify(rtwdev, true); + + set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); + set_bit(RTW_FLAG_SCANNING, rtwdev->flags); +} + +void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) +{ + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + u32 config = 0; + + clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); + clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); + + rtw_core_fw_scan_notify(rtwdev, false); + + ether_addr_copy(rtwvif->mac_addr, vif->addr); + config |= PORT_SET_MAC_ADDR; + rtw_vif_port_config(rtwdev, rtwvif, config); + + rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); +} + int rtw_core_start(struct rtw_dev *rtwdev) { int ret; @@ -1862,13 +1940,14 @@ int rtw_core_init(struct rtw_dev *rtwdev) rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; + rtwdev->dm_info.fix_rate = U8_MAX; set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); rtw_stats_init(rtwdev); /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | - BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | + BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); @@ -1951,6 +2030,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -1963,8 +2043,12 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) WIPHY_FLAG_TDLS_EXTERNAL_SETUP; hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; + hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); #ifdef CONFIG_PM hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; @@ -1973,6 +2057,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) rtw_set_supported_band(hw, rtwdev->chip); SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); + hw->wiphy->sar_capa = &rtw_sar_capa; + ret = rtw_regd_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to init regd\n"); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index bbdd535b64e7..dc1cd9bd4b8a 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -22,6 +22,9 @@ #define RTW_MAX_SEC_CAM_NUM 32 #define MAX_PG_CAM_BACKUP_NUM 8 +#define RTW_SCAN_MAX_SSIDS 4 +#define RTW_SCAN_MAX_IE_LEN 128 + #define RTW_MAX_PATTERN_NUM 12 #define RTW_MAX_PATTERN_MASK_SIZE 16 #define RTW_MAX_PATTERN_SIZE 128 @@ -81,11 +84,9 @@ struct rtw_hci { IS_CH_5G_BAND_3(channel) || IS_CH_5G_BAND_4(channel)) enum rtw_supported_band { - RTW_BAND_2G = 1 << 0, - RTW_BAND_5G = 1 << 1, - RTW_BAND_60G = 1 << 2, - - RTW_BAND_MAX, + RTW_BAND_2G = BIT(NL80211_BAND_2GHZ), + RTW_BAND_5G = BIT(NL80211_BAND_5GHZ), + RTW_BAND_60G = BIT(NL80211_BAND_60GHZ), }; /* now, support upto 80M bw */ @@ -364,6 +365,7 @@ enum rtw_flags { RTW_FLAG_WOWLAN, RTW_FLAG_RESTARTING, RTW_FLAG_RESTART_TRIGGERING, + RTW_FLAG_FORCE_LOWEST_RATE, NUM_OF_RTW_FLAGS, }; @@ -629,6 +631,8 @@ struct rtw_rx_pkt_stat { s8 rx_snr[RTW_RF_PATH_MAX]; u8 rx_evm[RTW_RF_PATH_MAX]; s8 cfo_tail[RTW_RF_PATH_MAX]; + u16 freq; + u8 band; struct rtw_sta_info *si; struct ieee80211_vif *vif; @@ -799,6 +803,8 @@ struct rtw_vif { struct list_head rsvd_page_list; struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; const struct rtw_vif_port *conf; + struct cfg80211_scan_request *scan_req; + struct ieee80211_scan_ies *scan_ies; struct rtw_traffic_stats stats; @@ -1630,6 +1636,7 @@ struct rtw_dm_info { u8 cck_gi_u_bnd; u8 cck_gi_l_bnd; + u8 fix_rate; u8 tx_rate; u32 rrsr_val_init; u32 rrsr_mask_min; @@ -1806,6 +1813,33 @@ struct rtw_fw_state { u32 feature; }; +enum rtw_sar_sources { + RTW_SAR_SOURCE_NONE, + RTW_SAR_SOURCE_COMMON, +}; + +enum rtw_sar_bands { + RTW_SAR_BAND_0, + RTW_SAR_BAND_1, + /* RTW_SAR_BAND_2, not used now */ + RTW_SAR_BAND_3, + RTW_SAR_BAND_4, + + RTW_SAR_BAND_NR, +}; + +/* the union is reserved for other knids of SAR sources + * which might not re-use same format with array common. + */ +union rtw_sar_cfg { + s8 common[RTW_SAR_BAND_NR]; +}; + +struct rtw_sar { + enum rtw_sar_sources src; + union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX]; +}; + struct rtw_hal { u32 rcr; @@ -1817,6 +1851,7 @@ struct rtw_hal { u8 ps_mode; u8 current_channel; + u8 current_primary_channel_index; u8 current_band_width; u8 current_band_type; @@ -1853,6 +1888,9 @@ struct rtw_hal { [RTW_MAX_CHANNEL_NUM_5G]; s8 tx_pwr_tbl[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; + + enum rtw_sar_bands sar_band; + struct rtw_sar sar; }; struct rtw_path_div { @@ -1863,12 +1901,37 @@ struct rtw_path_div { u16 path_b_cnt; }; +struct rtw_chan_info { + int pri_ch_idx; + int action_id; + int bw; + u8 extra_info; + u8 channel; + u16 timeout; +}; + +struct rtw_chan_list { + u32 buf_size; + u32 ch_num; + u32 size; + u16 addr; +}; + +struct rtw_hw_scan_info { + struct ieee80211_vif *scanning_vif; + u8 probe_pg_size; + u8 op_pri_ch_idx; + u8 op_chan; + u8 op_bw; +}; + struct rtw_dev { struct ieee80211_hw *hw; struct device *dev; struct rtw_hci hci; + struct rtw_hw_scan_info scan_info; struct rtw_chip_info *chip; struct rtw_hal hal; struct rtw_fifo_conf fifo; @@ -2021,6 +2084,7 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) return 0; } +void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel); void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); @@ -2035,6 +2099,9 @@ void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config); void rtw_tx_report_purge_timer(struct timer_list *t); void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, + const u8 *mac_addr, bool hw_scan); +void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif); int rtw_core_start(struct rtw_dev *rtwdev); void rtw_core_stop(struct rtw_dev *rtwdev); int rtw_chip_info_setup(struct rtw_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index a7a6ebfaa203..a0991d3f15c0 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -2,7 +2,6 @@ /* Copyright(c) 2018-2019 Realtek Corporation */ -#include <linux/dmi.h> #include <linux/module.h> #include <linux/pci.h> #include "main.h" @@ -612,6 +611,9 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) bool tx_empty = true; u8 queue; + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + goto enter_deep_ps; + lockdep_assert_held(&rtwpci->irq_lock); /* Deep PS state is not allowed to TX-DMA */ @@ -637,7 +639,7 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) "TX path not empty, cannot enter deep power save state\n"); return; } - +enter_deep_ps: set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); rtw_power_mode_change(rtwdev, true); } @@ -808,7 +810,8 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) bd_idx = rtw_pci_tx_queue_idx_addr[queue]; spin_lock_bh(&rtwpci->irq_lock); - rtw_pci_deep_ps_leave(rtwdev); + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + rtw_pci_deep_ps_leave(rtwdev); rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); spin_unlock_bh(&rtwpci->irq_lock); } @@ -1409,7 +1412,11 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) * throughput. This is probably because the ASPM behavior slightly * varies from different SOC. */ - if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) + if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) + return; + + if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || + (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) rtw_pci_aspm_set(rtwdev, enter); } @@ -1658,6 +1665,9 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) priv); int work_done = 0; + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, false); + while (work_done < budget) { u32 work_done_once; @@ -1681,6 +1691,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) napi_schedule(napi); } + if (rtwpci->rx_no_aspm) + rtw_pci_link_ps(rtwdev, true); return work_done; } @@ -1702,50 +1714,13 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) netif_napi_del(&rtwpci->napi); } -enum rtw88_quirk_dis_pci_caps { - QUIRK_DIS_PCI_CAP_MSI, - QUIRK_DIS_PCI_CAP_ASPM, -}; - -static int disable_pci_caps(const struct dmi_system_id *dmi) -{ - uintptr_t dis_caps = (uintptr_t)dmi->driver_data; - - if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI)) - rtw_disable_msi = true; - if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM)) - rtw_pci_disable_aspm = true; - - return 1; -} - -static const struct dmi_system_id rtw88_pci_quirks[] = { - { - .callback = disable_pci_caps, - .ident = "Protempo Ltd L116HTN6SPW", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"), - DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"), - }, - .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), - }, - { - .callback = disable_pci_caps, - .ident = "HP HP Pavilion Laptop 14-ce0xxx", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"), - }, - .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), - }, - {} -}; - int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct pci_dev *bridge = pci_upstream_bridge(pdev); struct ieee80211_hw *hw; struct rtw_dev *rtwdev; + struct rtw_pci *rtwpci; int drv_data_size; int ret; @@ -1763,6 +1738,9 @@ int rtw_pci_probe(struct pci_dev *pdev, rtwdev->hci.ops = &rtw_pci_ops; rtwdev->hci.type = RTW_HCI_TYPE_PCIE; + rtwpci = (struct rtw_pci *)rtwdev->priv; + atomic_set(&rtwpci->link_usage, 1); + ret = rtw_core_init(rtwdev); if (ret) goto err_release_hw; @@ -1791,7 +1769,10 @@ int rtw_pci_probe(struct pci_dev *pdev, goto err_destroy_pci; } - dmi_check_system(rtw88_pci_quirks); + /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ + if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL) + rtwpci->rx_no_aspm = true; + rtw_pci_phy_cfg(rtwdev); ret = rtw_register_hw(rtwdev, hw); diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 66f78eb7757c..0c37efd8c66f 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -223,6 +223,8 @@ struct rtw_pci { struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM]; struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM]; u16 link_ctrl; + atomic_t link_usage; + bool rx_no_aspm; DECLARE_BITMAP(flags, NUM_OF_RTW_PCI_FLAGS); void __iomem *mmap; diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index bfddfcbe63f5..e505d17f107e 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -10,6 +10,7 @@ #include "phy.h" #include "debug.h" #include "regd.h" +#include "sar.h" struct phy_cfg_pair { u32 addr; @@ -2004,6 +2005,25 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, return tx_power; } +/* return RTW_RATE_SECTION_MAX to indicate rate is invalid */ +static u8 rtw_phy_rate_to_rate_section(u8 rate) +{ + if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) + return RTW_RATE_SECTION_CCK; + else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) + return RTW_RATE_SECTION_OFDM; + else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) + return RTW_RATE_SECTION_HT_1S; + else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) + return RTW_RATE_SECTION_HT_2S; + else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) + return RTW_RATE_SECTION_VHT_1S; + else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) + return RTW_RATE_SECTION_VHT_2S; + else + return RTW_RATE_SECTION_MAX; +} + static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, enum rtw_bandwidth bw, u8 rf_path, u8 rate, u8 channel, u8 regd) @@ -2011,7 +2031,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, struct rtw_hal *hal = &rtwdev->hal; u8 *cch_by_bw = hal->cch_by_bw; s8 power_limit = (s8)rtwdev->chip->max_power_index; - u8 rs; + u8 rs = rtw_phy_rate_to_rate_section(rate); int ch_idx; u8 cur_bw, cur_ch; s8 cur_lmt; @@ -2019,19 +2039,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, if (regd > RTW_REGD_WW) return power_limit; - if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) - rs = RTW_RATE_SECTION_CCK; - else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) - rs = RTW_RATE_SECTION_OFDM; - else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7) - rs = RTW_RATE_SECTION_HT_1S; - else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) - rs = RTW_RATE_SECTION_HT_2S; - else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) - rs = RTW_RATE_SECTION_VHT_1S; - else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) - rs = RTW_RATE_SECTION_VHT_2S; - else + if (rs == RTW_RATE_SECTION_MAX) goto err; /* only 20M BW with cck and ofdm */ @@ -2065,6 +2073,27 @@ err: return (s8)rtwdev->chip->max_power_index; } +static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band, + u8 rf_path, u8 rate) +{ + u8 rs = rtw_phy_rate_to_rate_section(rate); + struct rtw_sar_arg arg = { + .sar_band = sar_band, + .path = rf_path, + .rs = rs, + }; + + if (rs == RTW_RATE_SECTION_MAX) + goto err; + + return rtw_query_sar(rtwdev, &arg); + +err: + WARN(1, "invalid arguments, sar_band=%d, path=%d, rate=%d\n", + sar_band, rf_path, rate); + return (s8)rtwdev->chip->max_power_index; +} + void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, u8 ch, u8 regd, struct rtw_power_params *pwr_param) { @@ -2076,6 +2105,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, s8 *offset = &pwr_param->pwr_offset; s8 *limit = &pwr_param->pwr_limit; s8 *remnant = &pwr_param->pwr_remnant; + s8 *sar = &pwr_param->pwr_sar; pwr_idx = &rtwdev->efuse.txpwr_idx_table[path]; group = rtw_get_channel_group(ch, rate); @@ -2099,6 +2129,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, rate, ch, regd); *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck : dm_info->txagc_remnant_ofdm); + *sar = rtw_phy_get_tx_power_sar(rtwdev, hal->sar_band, path, rate); } u8 @@ -2113,7 +2144,9 @@ rtw_phy_get_tx_power_index(struct rtw_dev *rtwdev, u8 rf_path, u8 rate, channel, regd, &pwr_param); tx_power = pwr_param.pwr_base; - offset = min_t(s8, pwr_param.pwr_offset, pwr_param.pwr_limit); + offset = min3(pwr_param.pwr_offset, + pwr_param.pwr_limit, + pwr_param.pwr_sar); if (rtwdev->chip->en_dis_dpd) offset += rtw_phy_get_dis_dpd_by_rate_diff(rtwdev, rate); diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index 02d1ec47ffb1..b6c5ae60a462 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -148,6 +148,7 @@ struct rtw_power_params { s8 pwr_offset; s8 pwr_limit; s8 pwr_remnant; + s8 pwr_sar; }; void diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 3f0ac33156d6..bfa64c038f5f 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -83,6 +83,9 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter) /* Each request require an ack from firmware */ request |= POWER_MODE_ACK; + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) + request |= POWER_TX_WAKE; + rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request); /* Check firmware get the power requset and ack via cpwm register */ diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h index 7819391c8663..c194386f6db5 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.h +++ b/drivers/net/wireless/realtek/rtw88/ps.h @@ -9,6 +9,7 @@ #define POWER_MODE_ACK BIT(6) #define POWER_MODE_PG BIT(4) +#define POWER_TX_WAKE BIT(1) #define POWER_MODE_LCLK BIT(0) #define LEAVE_LPS_TRY_CNT 5 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 80a6f4da6acd..db078df63f85 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -223,7 +223,8 @@ static int rtw8821c_mac_init(struct rtw_dev *rtwdev) rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1); rtw_write8(rtwdev, REG_ACKTO_CCK, 0x40); rtw_write8_set(rtwdev, REG_WMAC_TRXPTCL_CTL_H, BIT(1)); - rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, BIT(6)); + rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, + BIT_DIS_CHK_VHTSIGB_CRC); rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2); rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 112faa60f653..d9fbddd7b0f3 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -131,7 +131,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index c409c8c29ec8..dd4fbb82750d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -205,7 +205,7 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ @@ -262,6 +262,8 @@ static int rtw8822b_mac_init(struct rtw_dev *rtwdev) rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1); rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2); rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1); + rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, + BIT_DIS_CHK_VHTSIGB_CRC); return 0; } diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 46b881e8e4fe..35c46e5209de 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -1962,7 +1962,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_MAC_INT_MIG_CFG 0x33330000 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ @@ -2102,6 +2102,8 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev) BIT_RXPSF_CONT_ERRCHKEN); value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07); rtw_write16(rtwdev, REG_RXPSF_CTRL, value16); + rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, + BIT_DIS_CHK_VHTSIGB_CRC); /* Interrupt migration configuration */ rtw_write32(rtwdev, REG_INT_MIG, WLAN_MAC_INT_MIG_CFG); @@ -2533,6 +2535,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, s8 rx_power[RTW_RF_PATH_MAX]; s8 min_rx_power = -120; u8 rssi; + u8 channel; int path; rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status); @@ -2553,6 +2556,11 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, rx_power[RF_PATH_A] -= 110; rx_power[RF_PATH_B] -= 110; + channel = GET_PHY_STAT_P0_CHANNEL(phy_status); + if (channel == 0) + channel = rtwdev->hal.current_channel; + rtw_set_rx_freq_band(pkt_stat, channel); + pkt_stat->rx_power[RF_PATH_A] = rx_power[RF_PATH_A]; pkt_stat->rx_power[RF_PATH_B] = rx_power[RF_PATH_B]; @@ -2578,6 +2586,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, u8 evm_dbm = 0; u8 rssi; int path; + u8 channel; if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0) rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status); @@ -2591,6 +2600,9 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, else bw = RTW_CHANNEL_WIDTH_20; + channel = GET_PHY_STAT_P1_CHANNEL(phy_status); + rtw_set_rx_freq_band(pkt_stat, channel); + pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110; pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110; pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h index 3df627419d81..8201955e1f21 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h @@ -137,6 +137,8 @@ const struct rtw_table name ## _tbl = { \ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0)) #define GET_PHY_STAT_P0_GAIN_A(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(21, 16)) +#define GET_PHY_STAT_P0_CHANNEL(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(23, 16)) #define GET_PHY_STAT_P0_GAIN_B(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(29, 24)) @@ -149,6 +151,8 @@ const struct rtw_table name ## _tbl = { \ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8)) #define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12)) +#define GET_PHY_STAT_P1_CHANNEL(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(23, 16)) #define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0)) #define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \ diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 7087e385a9b3..d2d607e22198 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -6,6 +6,7 @@ #include "rx.h" #include "ps.h" #include "debug.h" +#include "fw.h" void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb) @@ -138,6 +139,13 @@ static void rtw_rx_addr_match(struct rtw_dev *rtwdev, rtw_iterate_vifs_atomic(rtwdev, rtw_rx_addr_match_iter, &data); } +static void rtw_set_rx_freq_by_pktstat(struct rtw_rx_pkt_stat *pkt_stat, + struct ieee80211_rx_status *rx_status) +{ + rx_status->freq = pkt_stat->freq; + rx_status->band = pkt_stat->band; +} + void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, struct rtw_rx_pkt_stat *pkt_stat, struct ieee80211_hdr *hdr, @@ -150,6 +158,8 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, memset(rx_status, 0, sizeof(*rx_status)); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status); if (pkt_stat->crc_err) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (pkt_stat->decrypted) diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c new file mode 100644 index 000000000000..3383726c4d90 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/sar.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2021 Realtek Corporation + */ + +#include "sar.h" +#include "phy.h" +#include "debug.h" + +s8 rtw_query_sar(struct rtw_dev *rtwdev, const struct rtw_sar_arg *arg) +{ + const struct rtw_hal *hal = &rtwdev->hal; + const struct rtw_sar *sar = &hal->sar; + + switch (sar->src) { + default: + rtw_warn(rtwdev, "unknown SAR source: %d\n", sar->src); + fallthrough; + case RTW_SAR_SOURCE_NONE: + return (s8)rtwdev->chip->max_power_index; + case RTW_SAR_SOURCE_COMMON: + return sar->cfg[arg->path][arg->rs].common[arg->sar_band]; + } +} + +static int rtw_apply_sar(struct rtw_dev *rtwdev, const struct rtw_sar *new) +{ + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_sar *sar = &hal->sar; + + if (sar->src != RTW_SAR_SOURCE_NONE && new->src != sar->src) { + rtw_warn(rtwdev, "SAR source: %d is in use\n", sar->src); + return -EBUSY; + } + + *sar = *new; + rtw_phy_set_tx_power_level(rtwdev, hal->current_channel); + + return 0; +} + +static s8 rtw_sar_to_phy(struct rtw_dev *rtwdev, u8 fct, s32 sar, + const struct rtw_sar_arg *arg) +{ + struct rtw_hal *hal = &rtwdev->hal; + u8 txgi = rtwdev->chip->txgi_factor; + u8 max = rtwdev->chip->max_power_index; + s32 tmp; + s8 base; + + tmp = fct > txgi ? sar >> (fct - txgi) : sar << (txgi - fct); + base = arg->sar_band == RTW_SAR_BAND_0 ? + hal->tx_pwr_by_rate_base_2g[arg->path][arg->rs] : + hal->tx_pwr_by_rate_base_5g[arg->path][arg->rs]; + + return (s8)clamp_t(s32, tmp, -max - 1, max) - base; +} + +static const struct cfg80211_sar_freq_ranges rtw_common_sar_freq_ranges[] = { + [RTW_SAR_BAND_0] = { .start_freq = 2412, .end_freq = 2484, }, + [RTW_SAR_BAND_1] = { .start_freq = 5180, .end_freq = 5320, }, + [RTW_SAR_BAND_3] = { .start_freq = 5500, .end_freq = 5720, }, + [RTW_SAR_BAND_4] = { .start_freq = 5745, .end_freq = 5825, }, +}; + +static_assert(ARRAY_SIZE(rtw_common_sar_freq_ranges) == RTW_SAR_BAND_NR); + +const struct cfg80211_sar_capa rtw_sar_capa = { + .type = NL80211_SAR_TYPE_POWER, + .num_freq_ranges = RTW_SAR_BAND_NR, + .freq_ranges = rtw_common_sar_freq_ranges, +}; + +int rtw_set_sar_specs(struct rtw_dev *rtwdev, + const struct cfg80211_sar_specs *sar) +{ + struct rtw_sar_arg arg = {0}; + struct rtw_sar new = {0}; + u32 idx, i, j, k; + s32 power; + s8 val; + + if (sar->type != NL80211_SAR_TYPE_POWER) + return -EINVAL; + + memset(&new, rtwdev->chip->max_power_index, sizeof(new)); + new.src = RTW_SAR_SOURCE_COMMON; + + for (i = 0; i < sar->num_sub_specs; i++) { + idx = sar->sub_specs[i].freq_range_index; + if (idx >= RTW_SAR_BAND_NR) + return -EINVAL; + + power = sar->sub_specs[i].power; + rtw_info(rtwdev, "On freq %u to %u, set SAR %d in 1/%lu dBm\n", + rtw_common_sar_freq_ranges[idx].start_freq, + rtw_common_sar_freq_ranges[idx].end_freq, + power, BIT(RTW_COMMON_SAR_FCT)); + + for (j = 0; j < RTW_RF_PATH_MAX; j++) { + for (k = 0; k < RTW_RATE_SECTION_MAX; k++) { + arg = (struct rtw_sar_arg){ + .sar_band = idx, + .path = j, + .rs = k, + }; + val = rtw_sar_to_phy(rtwdev, RTW_COMMON_SAR_FCT, + power, &arg); + new.cfg[j][k].common[idx] = val; + } + } + } + + return rtw_apply_sar(rtwdev, &new); +} diff --git a/drivers/net/wireless/realtek/rtw88/sar.h b/drivers/net/wireless/realtek/rtw88/sar.h new file mode 100644 index 000000000000..e01e7bb790b7 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/sar.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2021 Realtek Corporation + */ + +#include "main.h" + +/* NL80211_SAR_TYPE_POWER means unit is in 0.25 dBm, + * where 0.25 = 1/4 = 2^(-2), so make factor 2. + */ +#define RTW_COMMON_SAR_FCT 2 + +struct rtw_sar_arg { + u8 sar_band; + u8 path; + u8 rs; +}; + +extern const struct cfg80211_sar_capa rtw_sar_capa; + +s8 rtw_query_sar(struct rtw_dev *rtwdev, const struct rtw_sar_arg *arg); +int rtw_set_sar_specs(struct rtw_dev *rtwdev, + const struct cfg80211_sar_specs *sar); diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 3a101aa139ed..efcc1b0371a8 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -233,17 +233,34 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) spin_unlock_irqrestore(&tx_report->q_lock, flags); } +static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb, + u8 lowest_rate, bool ignore_rate) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = tx_info->control.vif; + bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); + + if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest) + return lowest_rate; + + return __ffs(vif->bss_conf.basic_rates) + lowest_rate; +} + static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, - struct sk_buff *skb) + struct sk_buff *skb, + bool ignore_rate) { if (rtwdev->hal.current_band_type == RTW_BAND_2G) { pkt_info->rate_id = RTW_RATEID_B_20M; - pkt_info->rate = DESC_RATE1M; + pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M, + ignore_rate); } else { pkt_info->rate_id = RTW_RATEID_G; - pkt_info->rate = DESC_RATE6M; + pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M, + ignore_rate); } + pkt_info->use_rate = true; pkt_info->dis_rate_fallback = true; } @@ -280,7 +297,7 @@ static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct sk_buff *skb) { - rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; @@ -295,7 +312,9 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hw *hw = rtwdev->hw; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_sta_info *si; + u8 fix_rate; u16 seq; u8 ampdu_factor = 0; u8 ampdu_density = 0; @@ -347,6 +366,13 @@ out: pkt_info->bw = bw; pkt_info->stbc = stbc; pkt_info->ldpc = ldpc; + + fix_rate = dm_info->fix_rate; + if (fix_rate < DESC_RATE_MAX) { + pkt_info->rate = fix_rate; + pkt_info->dis_rate_fallback = true; + pkt_info->use_rate = true; + } } void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, @@ -404,7 +430,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, if (type != RSVD_BEACON && type != RSVD_DUMMY) pkt_info->qsel = TX_DESC_QSEL_MGMT; - rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index ad7a8155dbed..bd34e4bbe107 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -219,6 +219,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key, struct rtw89_sec_cam_entry *sec_cam) { + struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif; struct rtw89_addr_cam_entry *addr_cam; u8 key_idx = 0; @@ -243,7 +244,7 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx; addr_cam->sec_entries[key_idx] = sec_cam; set_bit(key_idx, addr_cam->sec_cam_map); - ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) { rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n", ret); @@ -371,6 +372,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key, bool inform_fw) { + struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_cam_info *cam_info = &rtwdev->cam_info; struct rtw89_vif *rtwvif; struct rtw89_addr_cam_entry *addr_cam; @@ -394,7 +396,7 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, clear_bit(key_idx, addr_cam->sec_cam_map); addr_cam->sec_entries[key_idx] = NULL; if (inform_fw) { - ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret); } @@ -536,12 +538,8 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev, void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { - struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; - if (vif->type == NL80211_IFTYPE_STATION) - ether_addr_copy(addr_cam->tma, rtwvif->bssid); ether_addr_copy(bssid_cam->bssid, rtwvif->bssid); } @@ -593,7 +591,7 @@ int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev, return 0; } -static u8 rtw89_cam_addr_hash(u8 start, u8 *addr) +static u8 rtw89_cam_addr_hash(u8 start, const u8 *addr) { u8 hash = 0; u8 i; @@ -606,15 +604,18 @@ static u8 rtw89_cam_addr_hash(u8 start, u8 *addr) void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, + const u8 *scan_mac_addr, u8 *cmd) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - struct ieee80211_sta *sta; - struct rtw89_sta *rtwsta; struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam; + struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta); + const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr; u8 sma_hash, tma_hash, addr_msk_start; u8 sma_start = 0; u8 tma_start = 0; + u8 *tma = sta ? sta->addr : rtwvif->bssid; if (addr_cam->addr_mask != 0) { addr_msk_start = __ffs(addr_cam->addr_mask); @@ -623,8 +624,8 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, else if (addr_cam->mask_sel == RTW89_TMA) tma_start = addr_msk_start; } - sma_hash = rtw89_cam_addr_hash(sma_start, rtwvif->mac_addr); - tma_hash = rtw89_cam_addr_hash(tma_start, addr_cam->tma); + sma_hash = rtw89_cam_addr_hash(sma_start, sma); + tma_hash = rtw89_cam_addr_hash(tma_start, tma); FWCMD_SET_ADDR_IDX(cmd, addr_cam->addr_cam_idx); FWCMD_SET_ADDR_OFFSET(cmd, addr_cam->offset); @@ -642,19 +643,19 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, addr_cam->bssid_cam_idx); - FWCMD_SET_ADDR_SMA0(cmd, rtwvif->mac_addr[0]); - FWCMD_SET_ADDR_SMA1(cmd, rtwvif->mac_addr[1]); - FWCMD_SET_ADDR_SMA2(cmd, rtwvif->mac_addr[2]); - FWCMD_SET_ADDR_SMA3(cmd, rtwvif->mac_addr[3]); - FWCMD_SET_ADDR_SMA4(cmd, rtwvif->mac_addr[4]); - FWCMD_SET_ADDR_SMA5(cmd, rtwvif->mac_addr[5]); + FWCMD_SET_ADDR_SMA0(cmd, sma[0]); + FWCMD_SET_ADDR_SMA1(cmd, sma[1]); + FWCMD_SET_ADDR_SMA2(cmd, sma[2]); + FWCMD_SET_ADDR_SMA3(cmd, sma[3]); + FWCMD_SET_ADDR_SMA4(cmd, sma[4]); + FWCMD_SET_ADDR_SMA5(cmd, sma[5]); - FWCMD_SET_ADDR_TMA0(cmd, addr_cam->tma[0]); - FWCMD_SET_ADDR_TMA1(cmd, addr_cam->tma[1]); - FWCMD_SET_ADDR_TMA2(cmd, addr_cam->tma[2]); - FWCMD_SET_ADDR_TMA3(cmd, addr_cam->tma[3]); - FWCMD_SET_ADDR_TMA4(cmd, addr_cam->tma[4]); - FWCMD_SET_ADDR_TMA5(cmd, addr_cam->tma[5]); + FWCMD_SET_ADDR_TMA0(cmd, tma[0]); + FWCMD_SET_ADDR_TMA1(cmd, tma[1]); + FWCMD_SET_ADDR_TMA2(cmd, tma[2]); + FWCMD_SET_ADDR_TMA3(cmd, tma[3]); + FWCMD_SET_ADDR_TMA4(cmd, tma[4]); + FWCMD_SET_ADDR_TMA5(cmd, tma[5]); FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif->port); FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif->port); @@ -662,15 +663,11 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif->lsig_txop); FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif->tgt_ind); FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind); - - if (vif->type == NL80211_IFTYPE_STATION) { - sta = rtwvif->mgd.ap; - if (sta) { - rtwsta = (struct rtw89_sta *)sta->drv_priv; - FWCMD_SET_ADDR_MACID(cmd, rtwsta->mac_id); - FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff); - } - } + FWCMD_SET_ADDR_MACID(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id); + if (rtwvif->net_type == RTW89_NET_TYPE_INFRA) + FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff); + else if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) + FWCMD_SET_ADDR_AID12(cmd, sta ? sta->aid & 0xfff : 0); FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern); FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif->wowlan_uc); FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif->wowlan_magic); diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h index 90a20a5375c6..33a3ad582b81 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.h +++ b/drivers/net/wireless/realtek/rtw89/cam.h @@ -9,145 +9,347 @@ #define RTW89_SEC_CAM_LEN 20 -#define FWCMD_SET_ADDR_IDX(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_OFFSET(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_LEN(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_VALID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0)) -#define FWCMD_SET_ADDR_NET_TYPE(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1)) -#define FWCMD_SET_ADDR_BCN_HIT_COND(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3)) -#define FWCMD_SET_ADDR_HIT_RULE(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5)) -#define FWCMD_SET_ADDR_BB_SEL(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7)) -#define FWCMD_SET_ADDR_ADDR_MASK(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8)) -#define FWCMD_SET_ADDR_MASK_SEL(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14)) -#define FWCMD_SET_ADDR_SMA_HASH(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_TMA_HASH(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0)) -#define FWCMD_SET_ADDR_SMA0(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_SMA1(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_SMA2(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_SMA3(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_SMA4(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_SMA5(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_TMA0(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_TMA1(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_TMA2(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_TMA3(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_TMA4(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_TMA5(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_MACID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_PORT_INT(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8)) -#define FWCMD_SET_ADDR_TSF_SYNC(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11)) -#define FWCMD_SET_ADDR_TF_TRS(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14)) -#define FWCMD_SET_ADDR_LSIG_TXOP(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15)) -#define FWCMD_SET_ADDR_TGT_IND(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24)) -#define FWCMD_SET_ADDR_FRM_TGT_IND(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27)) -#define FWCMD_SET_ADDR_AID12(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0)) -#define FWCMD_SET_ADDR_AID12_0(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_AID12_1(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8)) -#define FWCMD_SET_ADDR_WOL_PATTERN(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12)) -#define FWCMD_SET_ADDR_WOL_UC(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13)) -#define FWCMD_SET_ADDR_WOL_MAGIC(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14)) -#define FWCMD_SET_ADDR_WAPI(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15)) -#define FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16)) -#define FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18)) -#define FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20)) -#define FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22)) -#define FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24)) -#define FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26)) -#define FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28)) -#define FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30)) -#define FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_SEC_ENT0(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_SEC_ENT1(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_SEC_ENT2(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_SEC_ENT3(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_SEC_ENT4(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_SEC_ENT5(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_SEC_ENT6(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_BSSID_IDX(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_BSSID_OFFSET(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_BSSID_LEN(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_BSSID_VALID(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0)) -#define FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1)) -#define FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8)) -#define FWCMD_SET_ADDR_BSSID_BSSID0(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_BSSID_BSSID1(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24)) -#define FWCMD_SET_ADDR_BSSID_BSSID2(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0)) -#define FWCMD_SET_ADDR_BSSID_BSSID3(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8)) -#define FWCMD_SET_ADDR_BSSID_BSSID4(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16)) -#define FWCMD_SET_ADDR_BSSID_BSSID5(cmd, value) \ - le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24)) +static inline void FWCMD_SET_ADDR_IDX(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_OFFSET(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_LEN(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_VALID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0)); +} + +static inline void FWCMD_SET_ADDR_NET_TYPE(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1)); +} + +static inline void FWCMD_SET_ADDR_BCN_HIT_COND(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3)); +} + +static inline void FWCMD_SET_ADDR_HIT_RULE(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5)); +} + +static inline void FWCMD_SET_ADDR_BB_SEL(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7)); +} + +static inline void FWCMD_SET_ADDR_ADDR_MASK(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8)); +} + +static inline void FWCMD_SET_ADDR_MASK_SEL(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14)); +} + +static inline void FWCMD_SET_ADDR_SMA_HASH(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_TMA_HASH(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_BSSID_CAM_IDX(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0)); +} + +static inline void FWCMD_SET_ADDR_SMA0(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_SMA1(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_SMA2(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_SMA3(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_SMA4(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_SMA5(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_TMA0(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_TMA1(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_TMA2(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_TMA3(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_TMA4(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_TMA5(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_MACID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_PORT_INT(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8)); +} + +static inline void FWCMD_SET_ADDR_TSF_SYNC(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11)); +} + +static inline void FWCMD_SET_ADDR_TF_TRS(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14)); +} + +static inline void FWCMD_SET_ADDR_LSIG_TXOP(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15)); +} + +static inline void FWCMD_SET_ADDR_TGT_IND(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24)); +} + +static inline void FWCMD_SET_ADDR_FRM_TGT_IND(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27)); +} + +static inline void FWCMD_SET_ADDR_AID12(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0)); +} + +static inline void FWCMD_SET_ADDR_AID12_0(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_AID12_1(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8)); +} + +static inline void FWCMD_SET_ADDR_WOL_PATTERN(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12)); +} + +static inline void FWCMD_SET_ADDR_WOL_UC(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13)); +} + +static inline void FWCMD_SET_ADDR_WOL_MAGIC(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14)); +} + +static inline void FWCMD_SET_ADDR_WAPI(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT_MODE(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT0_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT1_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT2_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT3_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT4_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT5_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT6_KEYID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT_VALID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT0(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT1(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT2(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT3(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT4(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT5(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_SEC_ENT6(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_BSSID_IDX(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_BSSID_OFFSET(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_BSSID_LEN(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_BSSID_VALID(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BB_SEL(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSS_COLOR(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID0(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID1(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID2(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID3(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID4(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16)); +} + +static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value) +{ + le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24)); +} int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, - struct rtw89_vif *vif, u8 *cmd); + struct rtw89_vif *vif, + struct rtw89_sta *rtwsta, + const u8 *scan_mac_addr, u8 *cmd); int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev, struct rtw89_vif *vif, u8 *cmd); int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index abe4b6549ab2..9f7d4f8d0c56 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -540,8 +540,31 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update); static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func, void *param, u16 len) { - rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len, - false, true); + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + int ret; + + if (!wl->status.map.init_ok) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by btc not init!!\n", __func__); + pfwinfo->cnt_h2c_fail++; + return; + } else if ((wl->status.map.rf_off_pre == 1 && wl->status.map.rf_off == 1) || + (wl->status.map.lps_pre == 1 && wl->status.map.lps == 1)) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by wl off!!\n", __func__); + pfwinfo->cnt_h2c_fail++; + return; + } + + pfwinfo->cnt_h2c++; + + ret = rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len, + false, true); + if (ret != 0) + pfwinfo->cnt_h2c_fail++; } static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type) @@ -1095,6 +1118,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set; _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t); } + + _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]); + _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]); + _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles); } if (rpt_type == BTC_RPT_TYPE_CTRL) { @@ -1103,6 +1130,18 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw_coex = prpt->wl_fw_coex_ver; wl->ver_info.fw = prpt->wl_fw_ver; dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload); + + _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE, + pfwinfo->event[BTF_EVNT_RPT]); + + /* To avoid I/O if WL LPS or power-off */ + if (wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) { + rtwdev->chip->ops->btc_update_bt_cnt(rtwdev); + _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0); + + btc->cx.cnt_bt[BTC_BCNT_POLUT] = + rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0); + } } if (rpt_type >= BTC_RPT_TYPE_BT_VER && @@ -1596,7 +1635,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev) _set_bt_rx_gain(rtwdev, para.bt_rx_gain); if (bt->enable.now == 0 || wl->status.map.rf_off == 1 || - wl->status.map.lps == 1) + wl->status.map.lps == BTC_LPS_RF_OFF) wl_stb_chg = 0; else wl_stb_chg = 1; @@ -4199,16 +4238,16 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta switch (rf_state) { case BTC_RFCTRL_WL_OFF: wl->status.map.rf_off = 1; - wl->status.map.lps = 0; + wl->status.map.lps = BTC_LPS_OFF; break; case BTC_RFCTRL_FW_CTRL: wl->status.map.rf_off = 0; - wl->status.map.lps = 1; + wl->status.map.lps = BTC_LPS_RF_OFF; break; case BTC_RFCTRL_WL_ON: default: wl->status.map.rf_off = 0; - wl->status.map.lps = 0; + wl->status.map.lps = BTC_LPS_OFF; break; } @@ -4494,6 +4533,8 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; u8 *buf = &skb->data[RTW89_C2H_HEADER_LEN]; + len -= RTW89_C2H_HEADER_LEN; + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): C2H BT len:%d class:%d fun:%d\n", __func__, len, class, func); @@ -4512,14 +4553,12 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], handle C2H BT INFO with data %8ph\n", buf); btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE]++; - rtw89_leave_ps_mode(rtwdev); _update_bt_info(rtwdev, buf, len); break; case BTF_EVNT_BT_SCBD: rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], handle C2H BT SCBD with data %8ph\n", buf); btc->cx.cnt_bt[BTC_BCNT_SCBDUPDATE]++; - rtw89_leave_ps_mode(rtwdev); _update_bt_scbd(rtwdev, false); break; case BTF_EVNT_BT_PSD: @@ -4765,7 +4804,6 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m) static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) { - const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_bt_info *bt = &cx->bt; @@ -4773,7 +4811,6 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_module *module = &btc->mdinfo; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; u8 *afh = bt_linfo->afh_map; - u16 polt_cnt = 0; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT)) return; @@ -4849,17 +4886,11 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) cx->cnt_bt[BTC_BCNT_INFOUPDATE], cx->cnt_bt[BTC_BCNT_INFOSAME]); - if (wl->status.map.lps || wl->status.map.rf_off) - return; - - chip->ops->btc_update_bt_cnt(rtwdev); - _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0); - seq_printf(m, " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)\n", "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX], cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX], - cx->cnt_bt[BTC_BCNT_LOPRI_TX], polt_cnt); + cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]); } #define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e @@ -5227,8 +5258,6 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m) pcysta->bcn_cnt[CXBCN_BT_SLOT], pcysta->bcn_cnt[CXBCN_BT_OK]); - _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles); - for (i = 0; i < CXST_MAX; i++) { if (!pcysta->slot_cnt[i]) continue; @@ -5252,9 +5281,6 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m) } seq_puts(m, "\n"); - _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]); - _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE, pcysta->slot_cnt[CXST_B1]); - seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", "[cycle_time]", pcysta->tavg_cycle[CXT_WL], @@ -5606,9 +5632,6 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m) pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt, prptctrl->rpt_enable, dm->error.val); - _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE, - pfwinfo->event[BTF_EVNT_RPT]); - if (dm->error.map.wl_fw_hang) seq_puts(m, " (WL FW Hang!!)"); seq_puts(m, "\n"); diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index 4b4565d15c9e..c3a722d259d7 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -130,6 +130,12 @@ enum btc_rfctrl { BTC_RFCTRL_MAX }; +enum btc_lps_state { + BTC_LPS_OFF = 0, + BTC_LPS_RF_OFF = 1, + BTC_LPS_RF_ON = 2 +}; + void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev); void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev); void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index d02ec5a735cb..a0737eea9f81 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2019-2020 Realtek Corporation */ +#include <linux/ip.h> +#include <linux/udp.h> #include "coex.h" #include "core.h" @@ -143,20 +145,15 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef, { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; - u8 *cch_by_bw = chan_param->cch_by_bw; u32 primary_freq, center_freq; u8 center_chan; u8 bandwidth = RTW89_CHANNEL_WIDTH_20; u8 primary_chan_idx = 0; - u8 i; center_chan = channel->hw_value; primary_freq = channel->center_freq; center_freq = chandef->center_freq1; - /* assign the center channel used while 20M bw is selected */ - cch_by_bw[RTW89_CHANNEL_WIDTH_20] = channel->hw_value; - switch (width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: @@ -183,10 +180,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef, primary_chan_idx = RTW89_SC_20_UPMOST; center_chan -= 6; } - /* assign the center channel used - * while 40M bw is selected - */ - cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan + 4; } else { if (center_freq - primary_freq == 10) { primary_chan_idx = RTW89_SC_20_LOWER; @@ -195,10 +188,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef, primary_chan_idx = RTW89_SC_20_LOWEST; center_chan += 6; } - /* assign the center channel used - * while 40M bw is selected - */ - cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan - 4; } break; default: @@ -210,12 +199,6 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef, chan_param->primary_chan = channel->hw_value; chan_param->bandwidth = bandwidth; chan_param->pri_ch_idx = primary_chan_idx; - - /* assign the center channel used while current bw is selected */ - cch_by_bw[bandwidth] = center_chan; - - for (i = bandwidth + 1; i <= RTW89_MAX_CHANNEL_WIDTH; i++) - cch_by_bw[i] = 0; } void rtw89_set_channel(struct rtw89_dev *rtwdev) @@ -228,7 +211,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev) u8 center_chan, bandwidth; u8 band_type; bool band_changed; - u8 i; rtw89_get_channel_params(&hw->conf.chandef, &ch_param); if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) @@ -242,6 +224,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev) hal->current_band_width = bandwidth; hal->current_channel = center_chan; + hal->prev_primary_channel = hal->current_primary_channel; hal->current_primary_channel = ch_param.primary_chan; hal->current_band_type = band_type; @@ -260,9 +243,6 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev) break; } - for (i = RTW89_CHANNEL_WIDTH_20; i <= RTW89_MAX_CHANNEL_WIDTH; i++) - hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; - rtw89_chip_set_channel_prepare(rtwdev, &bak); chip->ops->set_channel(rtwdev, &ch_param); @@ -881,8 +861,11 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr, { s16 cfo; + phy_ppdu->chan_idx = RTW89_GET_PHY_STS_IE01_CH_IDX(addr); + if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) + return; /* sign conversion for S(12,2) */ - cfo = sign_extend32(RTW89_GET_PHY_STS_IE0_CFO(addr), 11); + cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_CFO(addr), 11); rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu); } @@ -908,6 +891,7 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu) s8 *rssi = phy_ppdu->rssi; u8 *buf = phy_ppdu->buf; + phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf); phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf); rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf)); rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf)); @@ -936,8 +920,9 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev, u16 ie_len; u8 *pos, *end; - if (!phy_ppdu->to_self) - return 0; + /* mark invalid reports and bypass them */ + if (phy_ppdu->ie < RTW89_CCK_PKT) + return -EINVAL; pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN; end = (u8 *)phy_ppdu->buf + phy_ppdu->len; @@ -1000,9 +985,7 @@ static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev, data_rate_mode = GET_DATA_RATE_MODE(data_rate); if (data_rate_mode == DATA_RATE_MODE_NON_HT) { rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); - /* No 4 CCK rates for 5G */ - if (status->band == NL80211_BAND_5GHZ) - rate_idx -= 4; + /* rate_idx is still hardware value here */ } else if (data_rate_mode == DATA_RATE_MODE_HT) { rate_idx = GET_DATA_RATE_HT_IDX(data_rate); } else if (data_rate_mode == DATA_RATE_MODE_VHT) { @@ -1081,6 +1064,29 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev, rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data); } +static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev, + struct ieee80211_rx_status *status) +{ + u16 chan = rtwdev->hal.prev_primary_channel; + u8 band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + + if (status->band != NL80211_BAND_2GHZ && + status->encoding == RX_ENC_LEGACY && + status->rate_idx < RTW89_HW_RATE_OFDM6) { + status->freq = ieee80211_channel_to_frequency(chan, band); + status->band = band; + } +} + +static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) +{ + if (rx_status->band == NL80211_BAND_2GHZ || + rx_status->encoding != RX_ENC_LEGACY) + return; + /* No 4 CCK rates for non-2G */ + rx_status->rate_idx -= 4; +} + static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct rtw89_rx_desc_info *desc_info, @@ -1099,6 +1105,8 @@ static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev, rx_status = IEEE80211_SKB_RXCB(skb_ppdu); if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status)) rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status); + rtw89_correct_cck_chan(rtwdev, rx_status); + rtw89_core_hw_to_sband_rate(rx_status); rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi); rtwdev->napi_budget_countdown--; @@ -1112,6 +1120,7 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false, .len = skb->len, .to_self = desc_info->addr1_match, + .rate = desc_info->data_rate, .mac_id = desc_info->mac_id}; int ret; @@ -1267,12 +1276,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, if (data_rate_mode == DATA_RATE_MODE_NON_HT) { rx_status->encoding = RX_ENC_LEGACY; rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate); - /* No 4 CCK rates for 5G */ - if (rx_status->band == NL80211_BAND_5GHZ) - rx_status->rate_idx -= 4; - if (rtwdev->scanning) - rx_status->rate_idx = min_t(u8, rx_status->rate_idx, - ARRAY_SIZE(rtw89_bitrates) - 5); + /* convert rate_idx after we get the correct band */ } else if (data_rate_mode == DATA_RATE_MODE_HT) { rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate); @@ -1324,10 +1328,13 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev, { struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts; u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0; + struct ieee80211_rx_status *rx_status; struct sk_buff *skb_ppdu, *tmp; skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) { skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]); + rx_status = IEEE80211_SKB_RXCB(skb_ppdu); + rtw89_core_hw_to_sband_rate(rx_status); rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu); ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi); rtwdev->napi_budget_countdown--; @@ -1360,6 +1367,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev, BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) { skb_queue_tail(&ppdu_sts->rx_queue[band], skb); } else { + rtw89_core_hw_to_sband_rate(rx_status); rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb); ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi); rtwdev->napi_budget_countdown--; @@ -1825,7 +1833,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, ewma_rssi_init(&rtwsta->avg_rssi); if (vif->type == NL80211_IFTYPE_STATION) { - rtwvif->mgd.ap = sta; + /* for station mode, assign the mac_id from itself */ + rtwsta->mac_id = rtwvif->mac_id; rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_START); rtw89_chip_rfk_channel(rtwdev); @@ -1851,6 +1860,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; int ret; rtw89_mac_bf_monitor_calc(rtwdev, sta, true); @@ -1872,7 +1882,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev, } /* update cam aid mac_id net_type */ - rtw89_fw_h2c_cam(rtwdev, rtwvif); + rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) { rtw89_warn(rtwdev, "failed to send h2c cam\n"); return ret; @@ -1897,10 +1907,6 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, return ret; } - /* for station mode, assign the mac_id from itself */ - if (vif->type == NL80211_IFTYPE_STATION) - rtwsta->mac_id = rtwvif->mac_id; - ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0); if (ret) { rtw89_warn(rtwdev, "failed to send h2c join info\n"); @@ -1908,7 +1914,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev, } /* update cam aid mac_id net_type */ - rtw89_fw_h2c_cam(rtwdev, rtwvif); + rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) { rtw89_warn(rtwdev, "failed to send h2c cam\n"); return ret; @@ -2115,7 +2121,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev, IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; + u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); if (i == NL80211_IFTYPE_STATION) phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index c2885e4dd882..7c84556ec4ad 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -411,12 +411,13 @@ enum rtw89_regulation_type { RTW89_NA = 4, RTW89_IC = 5, RTW89_KCC = 6, - RTW89_NCC = 7, - RTW89_CHILE = 8, - RTW89_ACMA = 9, - RTW89_MEXICO = 10, + RTW89_ACMA = 7, + RTW89_NCC = 8, + RTW89_MEXICO = 9, + RTW89_CHILE = 10, RTW89_UKRAINE = 11, RTW89_CN = 12, + RTW89_QATAR = 13, RTW89_REGD_NUM, }; @@ -472,6 +473,9 @@ struct rtw89_rx_phy_ppdu { u8 rssi_avg; s8 rssi[RF_PATH_MAX]; u8 mac_id; + u8 chan_idx; + u8 ie; + u16 rate; bool to_self; bool valid; }; @@ -543,7 +547,6 @@ enum rtw89_ps_mode { RTW89_PS_MODE_PWR_GATED = 3, }; -#define RTW89_MAX_CHANNEL_WIDTH RTW89_CHANNEL_WIDTH_80 #define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1) #define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1) #define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1) @@ -570,7 +573,6 @@ struct rtw89_channel_params { u8 primary_chan; u8 bandwidth; u8 pri_ch_idx; - u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1]; }; struct rtw89_channel_help_params { @@ -803,6 +805,7 @@ enum rtw89_btc_bt_state_cnt { BTC_BCNT_HIPRI_RX, BTC_BCNT_LOPRI_TX, BTC_BCNT_LOPRI_RX, + BTC_BCNT_POLUT, BTC_BCNT_RATECHG, BTC_BCNT_NUM }; @@ -1865,7 +1868,6 @@ struct rtw89_addr_cam_entry { u8 wapi : 1; u8 mask_sel : 2; u8 bssid_cam_idx: 6; - u8 tma[ETH_ALEN]; u8 sma[ETH_ALEN]; u8 sec_ent_mode; @@ -1934,14 +1936,6 @@ struct rtw89_vif { bool wowlan_magic; bool is_hesta; bool last_a_ctrl; - union { - struct { - struct ieee80211_sta *ap; - } mgd; - struct { - struct list_head sta_list; - } ap; - }; struct rtw89_addr_cam_entry addr_cam; struct rtw89_bssid_cam_entry bssid_cam; struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; @@ -2354,14 +2348,11 @@ struct rtw89_hal { u32 rx_fltr; u8 cv; u8 current_channel; + u8 prev_primary_channel; u8 current_primary_channel; enum rtw89_subband current_subband; u8 current_band_width; u8 current_band_type; - /* center channel for different available bandwidth, - * val of (bw > current_band_width) is invalid - */ - u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1]; u32 sw_amsdu_max_size; u32 antenna_tx; u32 antenna_rx; @@ -3127,6 +3118,16 @@ static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta) return container_of(p, struct ieee80211_sta, drv_priv); } +static inline struct ieee80211_sta *rtwsta_to_sta_safe(struct rtw89_sta *rtwsta) +{ + return rtwsta ? rtwsta_to_sta(rtwsta) : NULL; +} + +static inline struct rtw89_sta *sta_to_rtwsta_safe(struct ieee80211_sta *sta) +{ + return sta ? (struct rtw89_sta *)sta->drv_priv : NULL; +} + static inline void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev, struct rtw89_channel_help_params *p) diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 29eb188c888c..22bd1d03e722 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -2,6 +2,8 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include <linux/vmalloc.h> + #include "coex.h" #include "debug.h" #include "fw.h" @@ -723,6 +725,7 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp, } static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { + [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, @@ -735,6 +738,10 @@ static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, }; static void rtw89_debug_dump_mac_mem(struct seq_file *m, @@ -814,7 +821,7 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, return -EINVAL; } - enable = set == 0 ? false : true; + enable = set != 0; switch (sel) { case 0: debugfs_priv->dbgpkg_en.ss_dbg = enable; @@ -2280,7 +2287,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) switch (status->encoding) { case RX_ENC_LEGACY: seq_printf(m, "Legacy %d", status->rate_idx + - (status->band == NL80211_BAND_5GHZ ? 4 : 0)); + (status->band != NL80211_BAND_2GHZ ? 4 : 0)); break; case RX_ENC_HT: seq_printf(m, "HT MCS-%d%s", status->rate_idx, diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 212aaf577d3c..8a57b75b07c0 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, info->section_num = GET_FW_HDR_SEC_NUM(fw); info->hdr_len = RTW89_FW_HDR_SIZE + info->section_num * RTW89_FW_SECTION_HDR_SIZE; - SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN); bin = fw + info->hdr_len; @@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l } skb_put_data(skb, fw, len); + SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FWDL, H2C_FUNC_MAC_FWHDR_DL, len); @@ -523,7 +523,8 @@ void rtw89_unload_firmware(struct rtw89_dev *rtwdev) } #define H2C_CAM_LEN 60 -int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) { struct sk_buff *skb; @@ -533,7 +534,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) return -ENOMEM; } skb_put(skb, H2C_CAM_LEN); - rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, skb->data); + rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, @@ -780,7 +781,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, if (!ppe_th) { u8 pad; - pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK, + pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, sta->he_cap.he_cap_elem.phy_cap_info[9]); for (i = 0; i < RTW89_PPE_BW_NUM; i++) diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 7ee0d9323310..2d36dc27222f 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -156,861 +156,1125 @@ struct rtw89_h2creg_sch_tx_en { u16 rsvd:15; } __packed; -#define RTW89_SET_FWCMD_RA_IS_DIS(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0)) -#define RTW89_SET_FWCMD_RA_MODE(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1)) -#define RTW89_SET_FWCMD_RA_BW_CAP(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6)) -#define RTW89_SET_FWCMD_RA_MACID(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)) -#define RTW89_SET_FWCMD_RA_DCM(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16)) -#define RTW89_SET_FWCMD_RA_ER(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17)) -#define RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18)) -#define RTW89_SET_FWCMD_RA_UPD_ALL(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20)) -#define RTW89_SET_FWCMD_RA_SGI(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21)) -#define RTW89_SET_FWCMD_RA_LDPC(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22)) -#define RTW89_SET_FWCMD_RA_STBC(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23)) -#define RTW89_SET_FWCMD_RA_SS_NUM(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24)) -#define RTW89_SET_FWCMD_RA_GILTF(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27)) -#define RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30)) -#define RTW89_SET_FWCMD_RA_UPD_MASK(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31)) -#define RTW89_SET_FWCMD_RA_MASK_0(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_RA_MASK_1(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8)) -#define RTW89_SET_FWCMD_RA_MASK_2(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16)) -#define RTW89_SET_FWCMD_RA_MASK_3(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24)) -#define RTW89_SET_FWCMD_RA_MASK_4(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31)) -#define RTW89_SET_FWCMD_RA_BAND_NUM(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8)) -#define RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9)) -#define RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10)) -#define RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16)) -#define RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24)) -#define RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26)) -#define RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29)) - -#define RTW89_SET_FWCMD_SEC_IDX(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_SEC_OFFSET(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)) -#define RTW89_SET_FWCMD_SEC_LEN(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16)) -#define RTW89_SET_FWCMD_SEC_TYPE(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0)) -#define RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4)) -#define RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5)) -#define RTW89_SET_FWCMD_SEC_KEY0(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0)) -#define RTW89_SET_FWCMD_SEC_KEY1(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0)) -#define RTW89_SET_FWCMD_SEC_KEY2(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0)) -#define RTW89_SET_FWCMD_SEC_KEY3(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0)) - -#define RTW89_SET_EDCA_SEL(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0)) -#define RTW89_SET_EDCA_BAND(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3)) -#define RTW89_SET_EDCA_WMM(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4)) -#define RTW89_SET_EDCA_AC(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5)) -#define RTW89_SET_EDCA_PARAM(cmd, val) \ - le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0)) +static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_RA_MODE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1)); +} + +static inline void RTW89_SET_FWCMD_RA_BW_CAP(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6)); +} + +static inline void RTW89_SET_FWCMD_RA_MACID(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)); +} + +static inline void RTW89_SET_FWCMD_RA_DCM(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16)); +} + +static inline void RTW89_SET_FWCMD_RA_ER(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17)); +} + +static inline void RTW89_SET_FWCMD_RA_INIT_RATE_LV(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18)); +} + +static inline void RTW89_SET_FWCMD_RA_UPD_ALL(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20)); +} + +static inline void RTW89_SET_FWCMD_RA_SGI(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21)); +} + +static inline void RTW89_SET_FWCMD_RA_LDPC(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22)); +} + +static inline void RTW89_SET_FWCMD_RA_STBC(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23)); +} + +static inline void RTW89_SET_FWCMD_RA_SS_NUM(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24)); +} + +static inline void RTW89_SET_FWCMD_RA_GILTF(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27)); +} + +static inline void RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30)); +} + +static inline void RTW89_SET_FWCMD_RA_UPD_MASK(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31)); +} + +static inline void RTW89_SET_FWCMD_RA_MASK_0(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_RA_MASK_1(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8)); +} + +static inline void RTW89_SET_FWCMD_RA_MASK_2(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16)); +} + +static inline void RTW89_SET_FWCMD_RA_MASK_3(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24)); +} + +static inline void RTW89_SET_FWCMD_RA_MASK_4(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31)); +} + +static inline void RTW89_SET_FWCMD_RA_BAND_NUM(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8)); +} + +static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9)); +} + +static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10)); +} + +static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16)); +} + +static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24)); +} + +static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26)); +} + +static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_BW(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29)); +} + +static inline void RTW89_SET_FWCMD_SEC_IDX(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_SEC_OFFSET(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8)); +} + +static inline void RTW89_SET_FWCMD_SEC_LEN(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16)); +} + +static inline void RTW89_SET_FWCMD_SEC_TYPE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0)); +} + +static inline void RTW89_SET_FWCMD_SEC_EXT_KEY(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4)); +} + +static inline void RTW89_SET_FWCMD_SEC_SPP_MODE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5)); +} + +static inline void RTW89_SET_FWCMD_SEC_KEY0(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0)); +} + +static inline void RTW89_SET_FWCMD_SEC_KEY1(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0)); +} + +static inline void RTW89_SET_FWCMD_SEC_KEY2(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0)); +} + +static inline void RTW89_SET_FWCMD_SEC_KEY3(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0)); +} + +static inline void RTW89_SET_EDCA_SEL(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0)); +} + +static inline void RTW89_SET_EDCA_BAND(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3)); +} + +static inline void RTW89_SET_EDCA_WMM(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4)); +} + +static inline void RTW89_SET_EDCA_AC(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5)); +} + +static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0)); +} #define FW_EDCA_PARAM_TXOPLMT_MSK GENMASK(26, 16) #define FW_EDCA_PARAM_CWMAX_MSK GENMASK(15, 12) #define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8) #define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0) #define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 0)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 0)) #define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(28)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(28)) #define GET_FWSECTION_HDR_REDL(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(29)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(29)) #define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr)), GENMASK(31, 0)) + le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0)) #define GET_FW_HDR_MAJOR_VERSION(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(7, 0)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(7, 0)) #define GET_FW_HDR_MINOR_VERSION(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(15, 8)) #define GET_FW_HDR_SUBVERSION(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 16)) #define GET_FW_HDR_SUBINDEX(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(31, 24)) + le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(31, 24)) #define GET_FW_HDR_MONTH(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(7, 0)) + le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(7, 0)) #define GET_FW_HDR_DATE(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(15, 8)) #define GET_FW_HDR_HOUR(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(23, 16)) #define GET_FW_HDR_MIN(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(31, 24)) + le32_get_bits(*((const __le32 *)(fwhdr) + 4), GENMASK(31, 24)) #define GET_FW_HDR_YEAR(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 5), GENMASK(31, 0)) + le32_get_bits(*((const __le32 *)(fwhdr) + 5), GENMASK(31, 0)) #define GET_FW_HDR_SEC_NUM(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(fwhdr) + 6), GENMASK(15, 8)) #define GET_FW_HDR_CMD_VERSERION(fwhdr) \ - le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24)) -#define SET_FW_HDR_PART_SIZE(fwhdr, val) \ - le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0)) - -#define SET_CTRL_INFO_MACID(table, val) \ - le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)) -#define SET_CTRL_INFO_OPERATION(table, val) \ - le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7)) + le32_get_bits(*((const __le32 *)(fwhdr) + 7), GENMASK(31, 24)) +static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val) +{ + le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0)); +} + +static inline void SET_CTRL_INFO_MACID(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)); +} + +static inline void SET_CTRL_INFO_OPERATION(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7)); +} #define SET_CMC_TBL_MASK_DATARATE GENMASK(8, 0) -#define SET_CMC_TBL_DATARATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE, \ - GENMASK(8, 0)); \ -} while (0) +static inline void SET_CMC_TBL_DATARATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE, + GENMASK(8, 0)); +} #define SET_CMC_TBL_MASK_FORCE_TXOP BIT(0) -#define SET_CMC_TBL_FORCE_TXOP(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP, \ - BIT(9)); \ -} while (0) +static inline void SET_CMC_TBL_FORCE_TXOP(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP, + BIT(9)); +} #define SET_CMC_TBL_MASK_DATA_BW GENMASK(1, 0) -#define SET_CMC_TBL_DATA_BW(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW, \ - GENMASK(11, 10)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_BW(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW, + GENMASK(11, 10)); +} #define SET_CMC_TBL_MASK_DATA_GI_LTF GENMASK(2, 0) -#define SET_CMC_TBL_DATA_GI_LTF(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF, \ - GENMASK(14, 12)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_GI_LTF(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF, + GENMASK(14, 12)); +} #define SET_CMC_TBL_MASK_DARF_TC_INDEX BIT(0) -#define SET_CMC_TBL_DARF_TC_INDEX(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX, \ - BIT(15)); \ -} while (0) +static inline void SET_CMC_TBL_DARF_TC_INDEX(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX, + BIT(15)); +} #define SET_CMC_TBL_MASK_ARFR_CTRL GENMASK(3, 0) -#define SET_CMC_TBL_ARFR_CTRL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL, \ - GENMASK(19, 16)); \ -} while (0) +static inline void SET_CMC_TBL_ARFR_CTRL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL, + GENMASK(19, 16)); +} #define SET_CMC_TBL_MASK_ACQ_RPT_EN BIT(0) -#define SET_CMC_TBL_ACQ_RPT_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN, \ - BIT(20)); \ -} while (0) +static inline void SET_CMC_TBL_ACQ_RPT_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN, + BIT(20)); +} #define SET_CMC_TBL_MASK_MGQ_RPT_EN BIT(0) -#define SET_CMC_TBL_MGQ_RPT_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN, \ - BIT(21)); \ -} while (0) +static inline void SET_CMC_TBL_MGQ_RPT_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN, + BIT(21)); +} #define SET_CMC_TBL_MASK_ULQ_RPT_EN BIT(0) -#define SET_CMC_TBL_ULQ_RPT_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN, \ - BIT(22)); \ -} while (0) +static inline void SET_CMC_TBL_ULQ_RPT_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN, + BIT(22)); +} #define SET_CMC_TBL_MASK_TWTQ_RPT_EN BIT(0) -#define SET_CMC_TBL_TWTQ_RPT_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN, \ - BIT(23)); \ -} while (0) +static inline void SET_CMC_TBL_TWTQ_RPT_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN, + BIT(23)); +} #define SET_CMC_TBL_MASK_DISRTSFB BIT(0) -#define SET_CMC_TBL_DISRTSFB(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB, \ - BIT(25)); \ -} while (0) +static inline void SET_CMC_TBL_DISRTSFB(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB, + BIT(25)); +} #define SET_CMC_TBL_MASK_DISDATAFB BIT(0) -#define SET_CMC_TBL_DISDATAFB(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB, \ - BIT(26)); \ -} while (0) +static inline void SET_CMC_TBL_DISDATAFB(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB, + BIT(26)); +} #define SET_CMC_TBL_MASK_TRYRATE BIT(0) -#define SET_CMC_TBL_TRYRATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE, \ - BIT(27)); \ -} while (0) +static inline void SET_CMC_TBL_TRYRATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE, + BIT(27)); +} #define SET_CMC_TBL_MASK_AMPDU_DENSITY GENMASK(3, 0) -#define SET_CMC_TBL_AMPDU_DENSITY(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28)); \ - le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY, \ - GENMASK(31, 28)); \ -} while (0) +static inline void SET_CMC_TBL_AMPDU_DENSITY(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28)); + le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY, + GENMASK(31, 28)); +} #define SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE GENMASK(8, 0) -#define SET_CMC_TBL_DATA_RTY_LOWEST_RATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE, \ - GENMASK(8, 0)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_RTY_LOWEST_RATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE, + GENMASK(8, 0)); +} #define SET_CMC_TBL_MASK_AMPDU_TIME_SEL BIT(0) -#define SET_CMC_TBL_AMPDU_TIME_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL, \ - BIT(9)); \ -} while (0) +static inline void SET_CMC_TBL_AMPDU_TIME_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL, + BIT(9)); +} #define SET_CMC_TBL_MASK_AMPDU_LEN_SEL BIT(0) -#define SET_CMC_TBL_AMPDU_LEN_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL, \ - BIT(10)); \ -} while (0) +static inline void SET_CMC_TBL_AMPDU_LEN_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL, + BIT(10)); +} #define SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL BIT(0) -#define SET_CMC_TBL_RTS_TXCNT_LMT_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL, \ - BIT(11)); \ -} while (0) +static inline void SET_CMC_TBL_RTS_TXCNT_LMT_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL, + BIT(11)); +} #define SET_CMC_TBL_MASK_RTS_TXCNT_LMT GENMASK(3, 0) -#define SET_CMC_TBL_RTS_TXCNT_LMT(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT, \ - GENMASK(15, 12)); \ -} while (0) +static inline void SET_CMC_TBL_RTS_TXCNT_LMT(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT, + GENMASK(15, 12)); +} #define SET_CMC_TBL_MASK_RTSRATE GENMASK(8, 0) -#define SET_CMC_TBL_RTSRATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE, \ - GENMASK(24, 16)); \ -} while (0) +static inline void SET_CMC_TBL_RTSRATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE, + GENMASK(24, 16)); +} #define SET_CMC_TBL_MASK_VCS_STBC BIT(0) -#define SET_CMC_TBL_VCS_STBC(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC, \ - BIT(27)); \ -} while (0) +static inline void SET_CMC_TBL_VCS_STBC(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC, + BIT(27)); +} #define SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE GENMASK(3, 0) -#define SET_CMC_TBL_RTS_RTY_LOWEST_RATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28)); \ - le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE, \ - GENMASK(31, 28)); \ -} while (0) +static inline void SET_CMC_TBL_RTS_RTY_LOWEST_RATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28)); + le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE, + GENMASK(31, 28)); +} #define SET_CMC_TBL_MASK_DATA_TX_CNT_LMT GENMASK(5, 0) -#define SET_CMC_TBL_DATA_TX_CNT_LMT(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT, \ - GENMASK(5, 0)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_TX_CNT_LMT(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT, + GENMASK(5, 0)); +} #define SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL BIT(0) -#define SET_CMC_TBL_DATA_TXCNT_LMT_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL, \ - BIT(6)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_TXCNT_LMT_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL, + BIT(6)); +} #define SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL BIT(0) -#define SET_CMC_TBL_MAX_AGG_NUM_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL, \ - BIT(7)); \ -} while (0) +static inline void SET_CMC_TBL_MAX_AGG_NUM_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL, + BIT(7)); +} #define SET_CMC_TBL_MASK_RTS_EN BIT(0) -#define SET_CMC_TBL_RTS_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN, \ - BIT(8)); \ -} while (0) +static inline void SET_CMC_TBL_RTS_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN, + BIT(8)); +} #define SET_CMC_TBL_MASK_CTS2SELF_EN BIT(0) -#define SET_CMC_TBL_CTS2SELF_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN, \ - BIT(9)); \ -} while (0) +static inline void SET_CMC_TBL_CTS2SELF_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN, + BIT(9)); +} #define SET_CMC_TBL_MASK_CCA_RTS GENMASK(1, 0) -#define SET_CMC_TBL_CCA_RTS(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS, \ - GENMASK(11, 10)); \ -} while (0) +static inline void SET_CMC_TBL_CCA_RTS(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS, + GENMASK(11, 10)); +} #define SET_CMC_TBL_MASK_HW_RTS_EN BIT(0) -#define SET_CMC_TBL_HW_RTS_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN, \ - BIT(12)); \ -} while (0) +static inline void SET_CMC_TBL_HW_RTS_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN, + BIT(12)); +} #define SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE GENMASK(1, 0) -#define SET_CMC_TBL_RTS_DROP_DATA_MODE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE, \ - GENMASK(14, 13)); \ -} while (0) +static inline void SET_CMC_TBL_RTS_DROP_DATA_MODE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE, + GENMASK(14, 13)); +} #define SET_CMC_TBL_MASK_AMPDU_MAX_LEN GENMASK(10, 0) -#define SET_CMC_TBL_AMPDU_MAX_LEN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN, \ - GENMASK(26, 16)); \ -} while (0) +static inline void SET_CMC_TBL_AMPDU_MAX_LEN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN, + GENMASK(26, 16)); +} #define SET_CMC_TBL_MASK_UL_MU_DIS BIT(0) -#define SET_CMC_TBL_UL_MU_DIS(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS, \ - BIT(27)); \ -} while (0) +static inline void SET_CMC_TBL_UL_MU_DIS(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS, + BIT(27)); +} #define SET_CMC_TBL_MASK_AMPDU_MAX_TIME GENMASK(3, 0) -#define SET_CMC_TBL_AMPDU_MAX_TIME(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28)); \ - le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME, \ - GENMASK(31, 28)); \ -} while (0) +static inline void SET_CMC_TBL_AMPDU_MAX_TIME(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28)); + le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME, + GENMASK(31, 28)); +} #define SET_CMC_TBL_MASK_MAX_AGG_NUM GENMASK(7, 0) -#define SET_CMC_TBL_MAX_AGG_NUM(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM, \ - GENMASK(7, 0)); \ -} while (0) +static inline void SET_CMC_TBL_MAX_AGG_NUM(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM, + GENMASK(7, 0)); +} #define SET_CMC_TBL_MASK_BA_BMAP GENMASK(1, 0) -#define SET_CMC_TBL_BA_BMAP(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP, \ - GENMASK(9, 8)); \ -} while (0) +static inline void SET_CMC_TBL_BA_BMAP(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP, + GENMASK(9, 8)); +} #define SET_CMC_TBL_MASK_VO_LFTIME_SEL GENMASK(2, 0) -#define SET_CMC_TBL_VO_LFTIME_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL, \ - GENMASK(18, 16)); \ -} while (0) +static inline void SET_CMC_TBL_VO_LFTIME_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL, + GENMASK(18, 16)); +} #define SET_CMC_TBL_MASK_VI_LFTIME_SEL GENMASK(2, 0) -#define SET_CMC_TBL_VI_LFTIME_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL, \ - GENMASK(21, 19)); \ -} while (0) +static inline void SET_CMC_TBL_VI_LFTIME_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL, + GENMASK(21, 19)); +} #define SET_CMC_TBL_MASK_BE_LFTIME_SEL GENMASK(2, 0) -#define SET_CMC_TBL_BE_LFTIME_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL, \ - GENMASK(24, 22)); \ -} while (0) +static inline void SET_CMC_TBL_BE_LFTIME_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL, + GENMASK(24, 22)); +} #define SET_CMC_TBL_MASK_BK_LFTIME_SEL GENMASK(2, 0) -#define SET_CMC_TBL_BK_LFTIME_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL, \ - GENMASK(27, 25)); \ -} while (0) +static inline void SET_CMC_TBL_BK_LFTIME_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL, + GENMASK(27, 25)); +} #define SET_CMC_TBL_MASK_SECTYPE GENMASK(3, 0) -#define SET_CMC_TBL_SECTYPE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28)); \ - le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE, \ - GENMASK(31, 28)); \ -} while (0) +static inline void SET_CMC_TBL_SECTYPE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28)); + le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE, + GENMASK(31, 28)); +} #define SET_CMC_TBL_MASK_MULTI_PORT_ID GENMASK(2, 0) -#define SET_CMC_TBL_MULTI_PORT_ID(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID, \ - GENMASK(2, 0)); \ -} while (0) +static inline void SET_CMC_TBL_MULTI_PORT_ID(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID, + GENMASK(2, 0)); +} #define SET_CMC_TBL_MASK_BMC BIT(0) -#define SET_CMC_TBL_BMC(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC, \ - BIT(3)); \ -} while (0) +static inline void SET_CMC_TBL_BMC(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC, + BIT(3)); +} #define SET_CMC_TBL_MASK_MBSSID GENMASK(3, 0) -#define SET_CMC_TBL_MBSSID(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID, \ - GENMASK(7, 4)); \ -} while (0) +static inline void SET_CMC_TBL_MBSSID(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID, + GENMASK(7, 4)); +} #define SET_CMC_TBL_MASK_NAVUSEHDR BIT(0) -#define SET_CMC_TBL_NAVUSEHDR(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR, \ - BIT(8)); \ -} while (0) +static inline void SET_CMC_TBL_NAVUSEHDR(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR, + BIT(8)); +} #define SET_CMC_TBL_MASK_TXPWR_MODE GENMASK(2, 0) -#define SET_CMC_TBL_TXPWR_MODE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE, \ - GENMASK(11, 9)); \ -} while (0) +static inline void SET_CMC_TBL_TXPWR_MODE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE, + GENMASK(11, 9)); +} #define SET_CMC_TBL_MASK_DATA_DCM BIT(0) -#define SET_CMC_TBL_DATA_DCM(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM, \ - BIT(12)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_DCM(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM, + BIT(12)); +} #define SET_CMC_TBL_MASK_DATA_ER BIT(0) -#define SET_CMC_TBL_DATA_ER(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER, \ - BIT(13)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_ER(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER, + BIT(13)); +} #define SET_CMC_TBL_MASK_DATA_LDPC BIT(0) -#define SET_CMC_TBL_DATA_LDPC(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC, \ - BIT(14)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_LDPC(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC, + BIT(14)); +} #define SET_CMC_TBL_MASK_DATA_STBC BIT(0) -#define SET_CMC_TBL_DATA_STBC(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC, \ - BIT(15)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_STBC(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC, + BIT(15)); +} #define SET_CMC_TBL_MASK_A_CTRL_BQR BIT(0) -#define SET_CMC_TBL_A_CTRL_BQR(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR, \ - BIT(16)); \ -} while (0) +static inline void SET_CMC_TBL_A_CTRL_BQR(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR, + BIT(16)); +} #define SET_CMC_TBL_MASK_A_CTRL_UPH BIT(0) -#define SET_CMC_TBL_A_CTRL_UPH(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH, \ - BIT(17)); \ -} while (0) +static inline void SET_CMC_TBL_A_CTRL_UPH(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH, + BIT(17)); +} #define SET_CMC_TBL_MASK_A_CTRL_BSR BIT(0) -#define SET_CMC_TBL_A_CTRL_BSR(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR, \ - BIT(18)); \ -} while (0) +static inline void SET_CMC_TBL_A_CTRL_BSR(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR, + BIT(18)); +} #define SET_CMC_TBL_MASK_A_CTRL_CAS BIT(0) -#define SET_CMC_TBL_A_CTRL_CAS(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS, \ - BIT(19)); \ -} while (0) +static inline void SET_CMC_TBL_A_CTRL_CAS(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS, + BIT(19)); +} #define SET_CMC_TBL_MASK_DATA_BW_ER BIT(0) -#define SET_CMC_TBL_DATA_BW_ER(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER, \ - BIT(20)); \ -} while (0) +static inline void SET_CMC_TBL_DATA_BW_ER(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER, + BIT(20)); +} #define SET_CMC_TBL_MASK_LSIG_TXOP_EN BIT(0) -#define SET_CMC_TBL_LSIG_TXOP_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN, \ - BIT(21)); \ -} while (0) +static inline void SET_CMC_TBL_LSIG_TXOP_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN, + BIT(21)); +} #define SET_CMC_TBL_MASK_CTRL_CNT_VLD BIT(0) -#define SET_CMC_TBL_CTRL_CNT_VLD(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD, \ - BIT(27)); \ -} while (0) +static inline void SET_CMC_TBL_CTRL_CNT_VLD(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD, + BIT(27)); +} #define SET_CMC_TBL_MASK_CTRL_CNT GENMASK(3, 0) -#define SET_CMC_TBL_CTRL_CNT(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28)); \ - le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT, \ - GENMASK(31, 28)); \ -} while (0) +static inline void SET_CMC_TBL_CTRL_CNT(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28)); + le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT, + GENMASK(31, 28)); +} #define SET_CMC_TBL_MASK_RESP_REF_RATE GENMASK(8, 0) -#define SET_CMC_TBL_RESP_REF_RATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE, \ - GENMASK(8, 0)); \ -} while (0) +static inline void SET_CMC_TBL_RESP_REF_RATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE, + GENMASK(8, 0)); +} #define SET_CMC_TBL_MASK_ALL_ACK_SUPPORT BIT(0) -#define SET_CMC_TBL_ALL_ACK_SUPPORT(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT, \ - BIT(12)); \ -} while (0) +static inline void SET_CMC_TBL_ALL_ACK_SUPPORT(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT, + BIT(12)); +} #define SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT BIT(0) -#define SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT, \ - BIT(13)); \ -} while (0) +static inline void SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT, + BIT(13)); +} #define SET_CMC_TBL_MASK_NTX_PATH_EN GENMASK(3, 0) -#define SET_CMC_TBL_NTX_PATH_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN, \ - GENMASK(19, 16)); \ -} while (0) +static inline void SET_CMC_TBL_NTX_PATH_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN, + GENMASK(19, 16)); +} #define SET_CMC_TBL_MASK_PATH_MAP_A GENMASK(1, 0) -#define SET_CMC_TBL_PATH_MAP_A(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A, \ - GENMASK(21, 20)); \ -} while (0) +static inline void SET_CMC_TBL_PATH_MAP_A(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A, + GENMASK(21, 20)); +} #define SET_CMC_TBL_MASK_PATH_MAP_B GENMASK(1, 0) -#define SET_CMC_TBL_PATH_MAP_B(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B, \ - GENMASK(23, 22)); \ -} while (0) +static inline void SET_CMC_TBL_PATH_MAP_B(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B, + GENMASK(23, 22)); +} #define SET_CMC_TBL_MASK_PATH_MAP_C GENMASK(1, 0) -#define SET_CMC_TBL_PATH_MAP_C(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C, \ - GENMASK(25, 24)); \ -} while (0) +static inline void SET_CMC_TBL_PATH_MAP_C(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C, + GENMASK(25, 24)); +} #define SET_CMC_TBL_MASK_PATH_MAP_D GENMASK(1, 0) -#define SET_CMC_TBL_PATH_MAP_D(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D, \ - GENMASK(27, 26)); \ -} while (0) +static inline void SET_CMC_TBL_PATH_MAP_D(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D, + GENMASK(27, 26)); +} #define SET_CMC_TBL_MASK_ANTSEL_A BIT(0) -#define SET_CMC_TBL_ANTSEL_A(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A, \ - BIT(28)); \ -} while (0) +static inline void SET_CMC_TBL_ANTSEL_A(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A, + BIT(28)); +} #define SET_CMC_TBL_MASK_ANTSEL_B BIT(0) -#define SET_CMC_TBL_ANTSEL_B(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B, \ - BIT(29)); \ -} while (0) +static inline void SET_CMC_TBL_ANTSEL_B(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B, + BIT(29)); +} #define SET_CMC_TBL_MASK_ANTSEL_C BIT(0) -#define SET_CMC_TBL_ANTSEL_C(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C, \ - BIT(30)); \ -} while (0) +static inline void SET_CMC_TBL_ANTSEL_C(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C, + BIT(30)); +} #define SET_CMC_TBL_MASK_ANTSEL_D BIT(0) -#define SET_CMC_TBL_ANTSEL_D(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31)); \ - le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, \ - BIT(31)); \ -} while (0) +static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31)); + le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, + BIT(31)); +} #define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0) -#define SET_CMC_TBL_ADDR_CAM_INDEX(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX, \ - GENMASK(7, 0)); \ -} while (0) +static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX, + GENMASK(7, 0)); +} #define SET_CMC_TBL_MASK_PAID GENMASK(8, 0) -#define SET_CMC_TBL_PAID(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID, \ - GENMASK(16, 8)); \ -} while (0) +static inline void SET_CMC_TBL_PAID(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID, + GENMASK(16, 8)); +} #define SET_CMC_TBL_MASK_ULDL BIT(0) -#define SET_CMC_TBL_ULDL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL, \ - BIT(17)); \ -} while (0) +static inline void SET_CMC_TBL_ULDL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL, + BIT(17)); +} #define SET_CMC_TBL_MASK_DOPPLER_CTRL GENMASK(1, 0) -#define SET_CMC_TBL_DOPPLER_CTRL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, \ - GENMASK(19, 18)); \ -} while (0) +static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, + GENMASK(19, 18)); +} #define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0) -#define SET_CMC_TBL_NOMINAL_PKT_PADDING(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ - GENMASK(21, 20)); \ -} while (0) -#define SET_CMC_TBL_NOMINAL_PKT_PADDING40(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ - GENMASK(23, 22)); \ -} while (0) +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(21, 20)); +} + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(23, 22)); +} #define SET_CMC_TBL_MASK_TXPWR_TOLERENCE GENMASK(3, 0) -#define SET_CMC_TBL_TXPWR_TOLERENCE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE, \ - GENMASK(27, 24)); \ -} while (0) -#define SET_CMC_TBL_NOMINAL_PKT_PADDING80(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30)); \ - le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \ - GENMASK(31, 30)); \ -} while (0) +static inline void SET_CMC_TBL_TXPWR_TOLERENCE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE, + GENMASK(27, 24)); +} + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(31, 30)); +} #define SET_CMC_TBL_MASK_NC GENMASK(2, 0) -#define SET_CMC_TBL_NC(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC, \ - GENMASK(2, 0)); \ -} while (0) +static inline void SET_CMC_TBL_NC(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC, + GENMASK(2, 0)); +} #define SET_CMC_TBL_MASK_NR GENMASK(2, 0) -#define SET_CMC_TBL_NR(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR, \ - GENMASK(5, 3)); \ -} while (0) +static inline void SET_CMC_TBL_NR(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR, + GENMASK(5, 3)); +} #define SET_CMC_TBL_MASK_NG GENMASK(1, 0) -#define SET_CMC_TBL_NG(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG, \ - GENMASK(7, 6)); \ -} while (0) +static inline void SET_CMC_TBL_NG(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG, + GENMASK(7, 6)); +} #define SET_CMC_TBL_MASK_CB GENMASK(1, 0) -#define SET_CMC_TBL_CB(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB, \ - GENMASK(9, 8)); \ -} while (0) +static inline void SET_CMC_TBL_CB(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB, + GENMASK(9, 8)); +} #define SET_CMC_TBL_MASK_CS GENMASK(1, 0) -#define SET_CMC_TBL_CS(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS, \ - GENMASK(11, 10)); \ -} while (0) +static inline void SET_CMC_TBL_CS(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS, + GENMASK(11, 10)); +} #define SET_CMC_TBL_MASK_CSI_TXBF_EN BIT(0) -#define SET_CMC_TBL_CSI_TXBF_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN, \ - BIT(12)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_TXBF_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN, + BIT(12)); +} #define SET_CMC_TBL_MASK_CSI_STBC_EN BIT(0) -#define SET_CMC_TBL_CSI_STBC_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN, \ - BIT(13)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_STBC_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN, + BIT(13)); +} #define SET_CMC_TBL_MASK_CSI_LDPC_EN BIT(0) -#define SET_CMC_TBL_CSI_LDPC_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN, \ - BIT(14)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_LDPC_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN, + BIT(14)); +} #define SET_CMC_TBL_MASK_CSI_PARA_EN BIT(0) -#define SET_CMC_TBL_CSI_PARA_EN(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN, \ - BIT(15)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_PARA_EN(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN, + BIT(15)); +} #define SET_CMC_TBL_MASK_CSI_FIX_RATE GENMASK(8, 0) -#define SET_CMC_TBL_CSI_FIX_RATE(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE, \ - GENMASK(24, 16)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_FIX_RATE(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE, + GENMASK(24, 16)); +} #define SET_CMC_TBL_MASK_CSI_GI_LTF GENMASK(2, 0) -#define SET_CMC_TBL_CSI_GI_LTF(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, \ - GENMASK(27, 25)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, + GENMASK(27, 25)); +} #define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0) -#define SET_CMC_TBL_CSI_GID_SEL(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, \ - BIT(29)); \ -} while (0) +static inline void SET_CMC_TBL_CSI_GID_SEL(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, + BIT(29)); +} #define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0) -#define SET_CMC_TBL_CSI_BW(table, val) \ -do { \ - le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30)); \ - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW, \ - GENMASK(31, 30)); \ -} while (0) - -#define SET_FWROLE_MAINTAIN_MACID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) -#define SET_FWROLE_MAINTAIN_SELF_ROLE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8)) -#define SET_FWROLE_MAINTAIN_UPD_MODE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10)) -#define SET_FWROLE_MAINTAIN_WIFI_ROLE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13)) - -#define SET_JOININFO_MACID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) -#define SET_JOININFO_OP(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(8)) -#define SET_JOININFO_BAND(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(9)) -#define SET_JOININFO_WMM(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10)) -#define SET_JOININFO_TGR(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(12)) -#define SET_JOININFO_ISHESTA(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(13)) -#define SET_JOININFO_DLBW(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14)) -#define SET_JOININFO_TF_MAC_PAD(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16)) -#define SET_JOININFO_DL_T_PE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18)) -#define SET_JOININFO_PORT_ID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21)) -#define SET_JOININFO_NET_TYPE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24)) -#define SET_JOININFO_WIFI_ROLE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26)) -#define SET_JOININFO_SELF_ROLE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30)) - -#define SET_GENERAL_PKT_MACID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) -#define SET_GENERAL_PKT_PROBRSP_ID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) -#define SET_GENERAL_PKT_PSPOLL_ID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16)) -#define SET_GENERAL_PKT_NULL_ID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)) -#define SET_GENERAL_PKT_QOS_NULL_ID(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0)) -#define SET_GENERAL_PKT_CTS2SELF_ID(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)) - -#define SET_LOG_CFG_LEVEL(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) -#define SET_LOG_CFG_PATH(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) -#define SET_LOG_CFG_COMP(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0)) -#define SET_LOG_CFG_COMP_EXT(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0)) - -#define SET_BA_CAM_VALID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(0)) -#define SET_BA_CAM_INIT_REQ(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, BIT(1)) -#define SET_BA_CAM_ENTRY_IDX(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2)) -#define SET_BA_CAM_TID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4)) -#define SET_BA_CAM_MACID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) -#define SET_BA_CAM_BMAP_SIZE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)) -#define SET_BA_CAM_SSN(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20)) - -#define SET_LPS_PARM_MACID(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)) -#define SET_LPS_PARM_PSMODE(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)) -#define SET_LPS_PARM_RLBM(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)) -#define SET_LPS_PARM_SMARTPS(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20)) -#define SET_LPS_PARM_AWAKEINTERVAL(h2c, val) \ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)) -#define SET_LPS_PARM_VOUAPSD(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0)) -#define SET_LPS_PARM_VIUAPSD(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1)) -#define SET_LPS_PARM_BEUAPSD(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2)) -#define SET_LPS_PARM_BKUAPSD(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3)) -#define SET_LPS_PARM_LASTRPWM(h2c, val) \ - le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)) +static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW, + GENMASK(31, 30)); +} + +static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); +} + +static inline void SET_FWROLE_MAINTAIN_SELF_ROLE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8)); +} + +static inline void SET_FWROLE_MAINTAIN_UPD_MODE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10)); +} + +static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13)); +} + +static inline void SET_JOININFO_MACID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); +} + +static inline void SET_JOININFO_OP(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(8)); +} + +static inline void SET_JOININFO_BAND(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(9)); +} + +static inline void SET_JOININFO_WMM(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10)); +} + +static inline void SET_JOININFO_TGR(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(12)); +} + +static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(13)); +} + +static inline void SET_JOININFO_DLBW(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14)); +} + +static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16)); +} + +static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18)); +} + +static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21)); +} + +static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24)); +} + +static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26)); +} + +static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30)); +} + +static inline void SET_GENERAL_PKT_MACID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); +} + +static inline void SET_GENERAL_PKT_PROBRSP_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)); +} + +static inline void SET_GENERAL_PKT_PSPOLL_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16)); +} + +static inline void SET_GENERAL_PKT_NULL_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)); +} + +static inline void SET_GENERAL_PKT_QOS_NULL_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0)); +} + +static inline void SET_GENERAL_PKT_CTS2SELF_ID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)); +} + +static inline void SET_LOG_CFG_LEVEL(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); +} + +static inline void SET_LOG_CFG_PATH(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)); +} + +static inline void SET_LOG_CFG_COMP(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0)); +} + +static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0)); +} + +static inline void SET_BA_CAM_VALID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(0)); +} + +static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, BIT(1)); +} + +static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2)); +} + +static inline void SET_BA_CAM_TID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4)); +} + +static inline void SET_BA_CAM_MACID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)); +} + +static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)); +} + +static inline void SET_BA_CAM_SSN(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20)); +} + +static inline void SET_LPS_PARM_MACID(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); +} + +static inline void SET_LPS_PARM_PSMODE(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)); +} + +static inline void SET_LPS_PARM_RLBM(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16)); +} + +static inline void SET_LPS_PARM_SMARTPS(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20)); +} + +static inline void SET_LPS_PARM_AWAKEINTERVAL(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)); +} + +static inline void SET_LPS_PARM_VOUAPSD(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0)); +} + +static inline void SET_LPS_PARM_VIUAPSD(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1)); +} + +static inline void SET_LPS_PARM_BEUAPSD(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2)); +} + +static inline void SET_LPS_PARM_BKUAPSD(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3)); +} + +static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val) +{ + le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)); +} enum rtw89_btc_btf_h2c_class { BTFC_SET = 0x10, @@ -1052,165 +1316,322 @@ enum rtw89_btc_cxdrvinfo { CXDRVINFO_MAX, }; -#define RTW89_SET_FWCMD_CXHDR_TYPE(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXHDR_LEN(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0)) - -#define RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0)) -#define RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1)) -#define RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0)) -#define RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1)) -#define RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2)) -#define RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0)) -#define RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1)) -#define RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2)) -#define RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3)) -#define RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4)) - -#define RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, val) \ - u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10)) -#define RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, val) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11)) -#define RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1)) -#define RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4)) -#define RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5)) -#define RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6)) -#define RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1)) -#define RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, val, n) \ - u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, val, n) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, val, n) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, val, n) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0)) -#define RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, val, n) \ - le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0)) - -#define RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0)) -#define RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1)) -#define RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2)) -#define RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3)) - -#define RTW89_SET_FWCMD_CXRFK_STATE(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0)) -#define RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2)) -#define RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6)) -#define RTW89_SET_FWCMD_CXRFK_BAND(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8)) -#define RTW89_SET_FWCMD_CXRFK_TYPE(cmd, val) \ - le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10)) +static inline void RTW89_SET_FWCMD_CXHDR_TYPE(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXHDR_LEN(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_ANT_TYPE(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_ANT_NUM(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_ANT_ISO(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_ANT_POS(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_MOD_RFE(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_MOD_CV(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_WL_GCH(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_WL_ONLY(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_WL_INITOK(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_DBCC_EN(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_CX_OTHER(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3)); +} + +static inline void RTW89_SET_FWCMD_CXINIT_BT_ONLY(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_LINK_MODE(void *cmd, u8 val) +{ + u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NONE(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_STA(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_AP(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_VAP(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_MESH(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NAN(void *cmd, u16 val) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n) +{ + u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0)); +} + +static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n) +{ + le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0)); +} + +static inline void RTW89_SET_FWCMD_CXCTRL_MANUAL(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0)); +} + +static inline void RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1)); +} + +static inline void RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2)); +} + +static inline void RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3)); +} + +static inline void RTW89_SET_FWCMD_CXRFK_STATE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0)); +} + +static inline void RTW89_SET_FWCMD_CXRFK_PATH_MAP(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2)); +} + +static inline void RTW89_SET_FWCMD_CXRFK_PHY_MAP(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6)); +} + +static inline void RTW89_SET_FWCMD_CXRFK_BAND(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8)); +} + +static inline void RTW89_SET_FWCMD_CXRFK_TYPE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10)); +} #define RTW89_C2H_HEADER_LEN 8 #define RTW89_GET_C2H_CATEGORY(c2h) \ - le32_get_bits(*((__le32 *)c2h), GENMASK(1, 0)) + le32_get_bits(*((const __le32 *)c2h), GENMASK(1, 0)) #define RTW89_GET_C2H_CLASS(c2h) \ - le32_get_bits(*((__le32 *)c2h), GENMASK(7, 2)) + le32_get_bits(*((const __le32 *)c2h), GENMASK(7, 2)) #define RTW89_GET_C2H_FUNC(c2h) \ - le32_get_bits(*((__le32 *)c2h), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)c2h), GENMASK(15, 8)) #define RTW89_GET_C2H_LEN(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 1), GENMASK(13, 0)) + le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(13, 0)) #define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2) #define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN) #define RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0)) #define RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2)) #define RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8)) #define RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16)) #define RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(31, 24)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24)) #define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0)) #define RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2)) #define RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8)) #define RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16)) #define RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 0)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 0)) #define RTW89_GET_PHY_C2H_RA_RPT_RETRY_RATIO(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16)) #define RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(6, 0)) + le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(6, 0)) #define RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(9, 8)) + le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(9, 8)) #define RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(12, 10)) + le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(12, 10)) #define RTW89_GET_PHY_C2H_RA_RPT_BW(c2h) \ - le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(14, 13)) + le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(14, 13)) /* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS * HT-new: [6:5]: NA, [4:0]: MCS @@ -1335,7 +1756,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta); -int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); +int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif, + struct rtw89_sta *rtwsta, const u8 *scan_mac_addr); void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h); void rtw89_fw_c2h_work(struct work_struct *work); int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index afcd07ab1de7..b98c47e9ecfe 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1093,7 +1093,6 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) static int dmac_func_en(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret = 0; val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | @@ -1107,7 +1106,7 @@ static int dmac_func_en(struct rtw89_dev *rtwdev) B_AX_WD_RLS_CLK_EN); rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); - return ret; + return 0; } static int chip_func_en(struct rtw89_dev *rtwdev) @@ -2991,7 +2990,7 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) if (ret) return ret; - ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); if (ret) return ret; @@ -3012,7 +3011,7 @@ int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_cam_deinit(rtwdev, rtwvif); - ret = rtw89_fw_h2c_cam(rtwdev, rtwvif); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); if (ret) return ret; @@ -3451,6 +3450,18 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val); } +u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) +{ + u32 reg; + u16 cnt; + + reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band); + cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); + rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); + + return cnt; +} + static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) { u32 reg; @@ -3695,7 +3706,7 @@ void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) { struct rtw89_traffic_stats *stats = &rtwdev->stats; struct rtw89_vif *rtwvif; - bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true; + bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); if (en == old) diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 6f3db8a2a9c2..b7d13edf7dd1 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -227,6 +227,7 @@ enum rtw89_mac_dbg_port_sel { /* SRAM mem dump */ #define R_AX_INDIR_ACCESS_ENTRY 0x40000 +#define AXIDMA_BASE_ADDR 0x18006000 #define STA_SCHED_BASE_ADDR 0x18808000 #define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000 #define SECURITY_CAM_BASE_ADDR 0x18814000 @@ -240,10 +241,15 @@ enum rtw89_mac_dbg_port_sel { #define DMAC_TBL_BASE_ADDR 0x18800000 #define SHCUT_MACHDR_BASE_ADDR 0x18800800 #define BCN_IE_CAM1_BASE_ADDR 0x188A0000 +#define TXD_FIFO_0_BASE_ADDR 0x18856200 +#define TXD_FIFO_1_BASE_ADDR 0x188A1080 +#define TXDATA_FIFO_0_BASE_ADDR 0x18856000 +#define TXDATA_FIFO_1_BASE_ADDR 0x188A1000 #define CCTL_INFO_SIZE 32 enum rtw89_mac_mem_sel { + RTW89_MAC_MEM_AXIDMA, RTW89_MAC_MEM_SHARED_BUF, RTW89_MAC_MEM_DMAC_TBL, RTW89_MAC_MEM_SHCUT_MACHDR, @@ -256,6 +262,10 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_BA_CAM, RTW89_MAC_MEM_BCN_IE_CAM0, RTW89_MAC_MEM_BCN_IE_CAM1, + RTW89_MAC_MEM_TXD_FIFO_0, + RTW89_MAC_MEM_TXD_FIFO_1, + RTW89_MAC_MEM_TXDATA_FIFO_0, + RTW89_MAC_MEM_TXDATA_FIFO_1, /* keep last */ RTW89_MAC_MEM_LAST, @@ -778,6 +788,7 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex_gnt *gnt_cfg); int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt); +u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band); void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val); u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev); bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 16dc6fb7dbb0..a322259f4cc4 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -27,6 +27,7 @@ static void rtw89_ops_tx(struct ieee80211_hw *hw, if (ret) { rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret); ieee80211_free_txskb(hw, skb); + return; } rtw89_core_tx_kick_off(rtwdev, qsel); } @@ -336,7 +337,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) { ether_addr_copy(rtwvif->bssid, conf->bssid); rtw89_cam_bssid_changed(rtwdev, rtwvif); - rtw89_fw_h2c_cam(rtwdev, rtwvif); + rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); } if (changed & BSS_CHANGED_ERP_SLOT) @@ -615,6 +616,7 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw, const u8 *mac_addr) { struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct rtw89_hal *hal = &rtwdev->hal; mutex_lock(&rtwdev->mutex); @@ -623,6 +625,7 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw, rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type); rtw89_chip_rfk_scan(rtwdev, true); rtw89_hci_recalc_int_mit(rtwdev); + rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr); mutex_unlock(&rtwdev->mutex); } @@ -630,8 +633,10 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; mutex_lock(&rtwdev->mutex); + rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); rtw89_chip_rfk_scan(rtwdev, false); rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0); rtwdev->scanning = false; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index ab134856baac..147009888de0 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -654,6 +654,12 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; + if (page >= RTW89_H2C_RF_PAGE_NUM) { + rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", + rf_path, info->curr_idx); + return; + } + info->rtw89_phy_config_rf_h2c[page][idx] = cpu_to_le32((reg->addr << 20) | reg->data); info->curr_idx++; @@ -662,30 +668,29 @@ rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, struct rtw89_fw_h2c_rf_reg_info *info) { - u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; - u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4; + u16 remain = info->curr_idx; + u16 len = 0; u8 i; int ret = 0; - if (page > RTW89_H2C_RF_PAGE_NUM) { + if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { rtw89_warn(rtwdev, - "rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n", - page, RTW89_H2C_RF_PAGE_NUM); - return -EINVAL; + "rf reg h2c total len %d larger than %d\n", + remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); + ret = -EINVAL; + goto out; } - for (i = 0; i < page; i++) { - ret = rtw89_fw_h2c_rf_reg(rtwdev, info, - RTW89_H2C_RF_PAGE_SIZE * 4, i); + for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { + len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; + ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); if (ret) - return ret; + goto out; } - ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i); - if (ret) - return ret; +out: info->curr_idx = 0; - return 0; + return ret; } static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, @@ -1099,9 +1104,15 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, switch (band) { case RTW89_BAND_2G: lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx]; + if (!lmt) + lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf] + [RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx]; + if (!lmt) + lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf] + [RTW89_WW][ch_idx]; break; default: rtw89_warn(rtwdev, "unknown band type: %d\n", band); @@ -1224,9 +1235,15 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, switch (band) { case RTW89_BAND_2G: lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx]; + if (!lmt_ru) + lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx] + [RTW89_WW][ch_idx]; break; case RTW89_BAND_5G: lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx]; + if (!lmt_ru) + lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx] + [RTW89_WW][ch_idx]; break; default: rtw89_warn(rtwdev, "unknown band type: %d\n", band); @@ -1767,7 +1784,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) } rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); cfo->cfo_avg_pre = new_cfo; - x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true; + x_cap_update = cfo->crystal_cap != pre_x_cap; rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, @@ -2404,6 +2421,116 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) env->ccx_watchdog_result, chk_result); } +static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) +{ + if (*ie_page > RTW89_PHYSTS_BITMAP_NUM || + *ie_page == RTW89_RSVD_9) + return false; + else if (*ie_page > RTW89_RSVD_9) + *ie_page -= 1; + + return true; +} + +static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page) +{ + static const u8 ie_page_shift = 2; + + return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift); +} + +static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev, + enum rtw89_phy_status_bitmap ie_page) +{ + u32 addr; + + if (!rtw89_physts_ie_page_valid(&ie_page)) + return 0; + + addr = rtw89_phy_get_ie_bitmap_addr(ie_page); + + return rtw89_phy_read32(rtwdev, addr); +} + +static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev, + enum rtw89_phy_status_bitmap ie_page, + u32 val) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u32 addr; + + if (!rtw89_physts_ie_page_valid(&ie_page)) + return; + + if (chip->chip_id == RTL8852A) + val &= B_PHY_STS_BITMAP_MSK_52A; + + addr = rtw89_phy_get_ie_bitmap_addr(ie_page); + rtw89_phy_write32(rtwdev, addr, val); +} + +static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev, + enum rtw89_phy_status_bitmap bitmap, + enum rtw89_phy_status_ie_type ie, + bool enable) +{ + u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap); + + if (enable) + val |= BIT(ie); + else + val &= ~BIT(ie); + + rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val); +} + +static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, + bool enable, + enum rtw89_phy_idx phy_idx) +{ + if (enable) { + rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, + B_STS_DIS_TRIG_BY_FAIL); + rtw89_phy_write32_clr(rtwdev, R_PLCP_HISTOGRAM, + B_STS_DIS_TRIG_BY_BRK); + } else { + rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, + B_STS_DIS_TRIG_BY_FAIL); + rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, + B_STS_DIS_TRIG_BY_BRK); + } +} + +static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 i; + + if (chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) + rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0); + + for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) { + if (i >= RTW89_CCK_PKT) + rtw89_physts_enable_ie_bitmap(rtwdev, i, + RTW89_PHYSTS_IE09_FTR_0, + true); + if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) || + (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT)) + continue; + rtw89_physts_enable_ie_bitmap(rtwdev, i, + RTW89_PHYSTS_IE24_OFDM_TD_PATH_A, + true); + } + rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT, + RTW89_PHYSTS_IE13_DL_MU_DEF, true); + rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT, + RTW89_PHYSTS_IE13_DL_MU_DEF, true); + + /* force IE01 for channel index, only channel field is valid */ + rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT, + RTW89_PHYSTS_IE01_CMN_OFDM, true); +} + static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -2839,6 +2966,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) rtw89_chip_bb_sethw(rtwdev); rtw89_phy_env_monitor_init(rtwdev); + rtw89_physts_parsing_init(rtwdev); rtw89_phy_dig_init(rtwdev); rtw89_phy_cfo_init(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 370129345e0f..b1f059b725a1 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -134,6 +134,66 @@ enum rtw89_ccx_unit { RTW89_CCX_32_US = 3 }; +enum rtw89_phy_status_ie_type { + RTW89_PHYSTS_IE00_CMN_CCK = 0, + RTW89_PHYSTS_IE01_CMN_OFDM = 1, + RTW89_PHYSTS_IE02_CMN_EXT_AX = 2, + RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3, + RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4, + RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5, + RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6, + RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7, + RTW89_PHYSTS_IE08_FTR_CH = 8, + RTW89_PHYSTS_IE09_FTR_0 = 9, + RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10, + RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11, + RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12, + RTW89_PHYSTS_IE13_DL_MU_DEF = 13, + RTW89_PHYSTS_IE14_TB_UL_CQI = 14, + RTW89_PHYSTS_IE15_TB_UL_DEF = 15, + RTW89_PHYSTS_IE16_RSVD16 = 16, + RTW89_PHYSTS_IE17_TB_UL_CTRL = 17, + RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18, + RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19, + RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20, + RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21, + RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22, + RTW89_PHYSTS_IE23_RSVD23 = 23, + RTW89_PHYSTS_IE24_OFDM_TD_PATH_A = 24, + RTW89_PHYSTS_IE25_OFDM_TD_PATH_B = 25, + RTW89_PHYSTS_IE26_OFDM_TD_PATH_C = 26, + RTW89_PHYSTS_IE27_OFDM_TD_PATH_D = 27, + RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28, + RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29, + RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30, + RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31, + + /* keep last */ + RTW89_PHYSTS_IE_NUM, + RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1 +}; + +enum rtw89_phy_status_bitmap { + RTW89_TD_SEARCH_FAIL = 0, + RTW89_BRK_BY_TX_PKT = 1, + RTW89_CCA_SPOOF = 2, + RTW89_OFDM_BRK = 3, + RTW89_CCK_BRK = 4, + RTW89_DL_MU_SPOOFING = 5, + RTW89_HE_MU = 6, + RTW89_VHT_MU = 7, + RTW89_UL_TB_SPOOFING = 8, + RTW89_RSVD_9 = 9, + RTW89_TRIG_BASE_PPDU = 10, + RTW89_CCK_PKT = 11, + RTW89_LEGACY_OFDM_PKT = 12, + RTW89_HT_PKT = 13, + RTW89_VHT_PKT = 14, + RTW89_HE_PKT = 15, + + RTW89_PHYSTS_BITMAP_NUM +}; + enum rtw89_dig_gain_type { RTW89_DIG_GAIN_LNA_G = 0, RTW89_DIG_GAIN_TIA_G = 1, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 365d8c8ce57b..e0a416d37d0e 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1658,7 +1658,7 @@ #define R_RSTB_WATCH_DOG 0x000C #define B_P0_RSTB_WATCH_DOG BIT(0) #define B_P1_RSTB_WATCH_DOG BIT(1) -#define B_UPD_P0_EN BIT(30) +#define B_UPD_P0_EN BIT(31) #define R_ANAPAR_PW15 0x030C #define B_ANAPAR_PW15 GENMASK(31, 24) #define B_ANAPAR_PW15_H GENMASK(27, 24) @@ -1674,6 +1674,29 @@ #define B_UPD_CLK_ADC_VAL GENMASK(26, 25) #define R_RSTB_ASYNC 0x0704 #define B_RSTB_ASYNC_ALL BIT(1) +#define R_MAC_PIN_SEL 0x0734 +#define B_CH_IDX_SEG0 GENMASK(23, 16) +#define R_PLCP_HISTOGRAM 0x0738 +#define B_STS_DIS_TRIG_BY_BRK BIT(2) +#define B_STS_DIS_TRIG_BY_FAIL BIT(3) +#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL +#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2) +#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C +#define B_PHY_STS_BITMAP_MSK_52A 0x337cff3f +#define R_PHY_STS_BITMAP_R2T 0x0740 +#define R_PHY_STS_BITMAP_CCA_SPOOF 0x0744 +#define R_PHY_STS_BITMAP_OFDM_BRK 0x0748 +#define R_PHY_STS_BITMAP_CCK_BRK 0x074C +#define R_PHY_STS_BITMAP_DL_MU_SPOOF 0x0750 +#define R_PHY_STS_BITMAP_HE_MU 0x0754 +#define R_PHY_STS_BITMAP_VHT_MU 0x0758 +#define R_PHY_STS_BITMAP_UL_TB_SPOOF 0x075C +#define R_PHY_STS_BITMAP_TRIGBASE 0x0760 +#define R_PHY_STS_BITMAP_CCK 0x0764 +#define R_PHY_STS_BITMAP_LEGACY 0x0768 +#define R_PHY_STS_BITMAP_HT 0x076C +#define R_PHY_STS_BITMAP_VHT 0x0770 +#define R_PHY_STS_BITMAP_HE 0x0774 #define R_PMAC_GNT 0x0980 #define B_PMAC_GNT_TXEN BIT(0) #define B_PMAC_GNT_RXEN BIT(16) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index f00b94ecfff4..4c37e590e43c 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -15,243 +15,244 @@ static const struct rtw89_regulatory rtw89_ww_regd = COUNTRY_REGD("00", RTW89_WW, RTW89_WW); static const struct rtw89_regulatory rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO), + COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE), + COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE), COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO), + COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO), COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC), - COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR), + COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE), + COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CN", RTW89_CN, RTW89_CN), + COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC), - COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CA", RTW89_IC, RTW89_IC), COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK), - COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC), + COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC), + COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC), COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA), + COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA), COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC), COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA), - COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA), + COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI), COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI), - COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI), + COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI), }; static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 5c6ffca3a324..6b75e4bc7352 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -1053,10 +1053,10 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx) { - bool cck_en = param->center_chan > 14 ? false : true; + bool cck_en = param->center_chan <= 14; u8 pri_ch_idx = param->pri_ch_idx; - if (param->center_chan <= 14) + if (cck_en) rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan, param->primary_chan, param->bandwidth); @@ -1069,6 +1069,8 @@ static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev, rtw8852a_bbrst_for_rfk(rtwdev, phy_idx); } rtw8852a_spur_elimination(rtwdev, param->center_chan); + rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, + param->primary_chan); rtw8852a_bb_reset_all(rtwdev, phy_idx); } @@ -1927,6 +1929,21 @@ void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); } +static void rtw8852a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u16 chan = phy_ppdu->chan_idx; + u8 band; + + if (chan == 0) + return; + + band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + status->freq = ieee80211_channel_to_frequency(chan, band); + status->band = band; +} + static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status) @@ -1939,6 +1956,8 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev, status->chains |= BIT(path); status->chain_signal[path] = rx_power[path]; } + if (phy_ppdu->valid) + rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); } static const struct rtw89_chip_ops rtw8852a_chip_ops = { @@ -2012,7 +2031,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .limit_efuse_size = 1152, .phycap_addr = 0x580, .phycap_size = 128, - .para_ver = 0x05050764, + .para_ver = 0x05050864, .wlcx_desired = 0x05050000, .btcx_desired = 0x5, .scbd = 0x1, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index 3a4fe7207420..253b5f8fc4f9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -43384,5248 +43384,6991 @@ static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0][0][0] = 56, - [0][0][0][0][0][1] = 56, - [0][0][0][0][0][2] = 56, - [0][0][0][0][0][3] = 56, - [0][0][0][0][0][4] = 56, - [0][0][0][0][0][5] = 56, - [0][0][0][0][0][6] = 56, - [0][0][0][0][0][7] = 56, - [0][0][0][0][0][8] = 56, - [0][0][0][0][0][9] = 56, - [0][0][0][0][0][10] = 56, - [0][0][0][0][0][11] = 56, - [0][0][0][0][0][12] = 48, - [0][0][0][0][0][13] = 76, - [0][1][0][0][0][0] = 44, - [0][1][0][0][0][1] = 44, - [0][1][0][0][0][2] = 44, - [0][1][0][0][0][3] = 44, - [0][1][0][0][0][4] = 44, - [0][1][0][0][0][5] = 44, - [0][1][0][0][0][6] = 44, - [0][1][0][0][0][7] = 44, - [0][1][0][0][0][8] = 44, - [0][1][0][0][0][9] = 44, - [0][1][0][0][0][10] = 44, - [0][1][0][0][0][11] = 44, - [0][1][0][0][0][12] = 38, - [0][1][0][0][0][13] = 64, - [1][0][0][0][0][0] = 0, - [1][0][0][0][0][1] = 0, - [1][0][0][0][0][2] = 58, - [1][0][0][0][0][3] = 58, - [1][0][0][0][0][4] = 58, - [1][0][0][0][0][5] = 58, - [1][0][0][0][0][6] = 46, - [1][0][0][0][0][7] = 46, - [1][0][0][0][0][8] = 46, - [1][0][0][0][0][9] = 32, - [1][0][0][0][0][10] = 32, - [1][0][0][0][0][11] = 0, - [1][0][0][0][0][12] = 0, - [1][0][0][0][0][13] = 0, - [1][1][0][0][0][0] = 0, - [1][1][0][0][0][1] = 0, - [1][1][0][0][0][2] = 46, - [1][1][0][0][0][3] = 46, - [1][1][0][0][0][4] = 46, - [1][1][0][0][0][5] = 46, - [1][1][0][0][0][6] = 46, - [1][1][0][0][0][7] = 46, - [1][1][0][0][0][8] = 46, - [1][1][0][0][0][9] = 24, - [1][1][0][0][0][10] = 24, - [1][1][0][0][0][11] = 0, - [1][1][0][0][0][12] = 0, - [1][1][0][0][0][13] = 0, - [0][0][1][0][0][0] = 58, - [0][0][1][0][0][1] = 58, - [0][0][1][0][0][2] = 58, - [0][0][1][0][0][3] = 58, - [0][0][1][0][0][4] = 58, - [0][0][1][0][0][5] = 58, - [0][0][1][0][0][6] = 58, - [0][0][1][0][0][7] = 58, - [0][0][1][0][0][8] = 58, - [0][0][1][0][0][9] = 58, - [0][0][1][0][0][10] = 58, - [0][0][1][0][0][11] = 56, - [0][0][1][0][0][12] = 52, - [0][0][1][0][0][13] = 0, - [0][1][1][0][0][0] = 46, - [0][1][1][0][0][1] = 46, - [0][1][1][0][0][2] = 46, - [0][1][1][0][0][3] = 46, - [0][1][1][0][0][4] = 46, - [0][1][1][0][0][5] = 46, - [0][1][1][0][0][6] = 46, - [0][1][1][0][0][7] = 46, - [0][1][1][0][0][8] = 46, - [0][1][1][0][0][9] = 46, - [0][1][1][0][0][10] = 46, - [0][1][1][0][0][11] = 42, - [0][1][1][0][0][12] = 40, - [0][1][1][0][0][13] = 0, - [0][0][2][0][0][0] = 58, - [0][0][2][0][0][1] = 58, - [0][0][2][0][0][2] = 58, - [0][0][2][0][0][3] = 58, - [0][0][2][0][0][4] = 58, - [0][0][2][0][0][5] = 58, - [0][0][2][0][0][6] = 58, - [0][0][2][0][0][7] = 58, - [0][0][2][0][0][8] = 58, - [0][0][2][0][0][9] = 58, - [0][0][2][0][0][10] = 58, - [0][0][2][0][0][11] = 54, - [0][0][2][0][0][12] = 50, - [0][0][2][0][0][13] = 0, - [0][1][2][0][0][0] = 46, - [0][1][2][0][0][1] = 46, - [0][1][2][0][0][2] = 46, - [0][1][2][0][0][3] = 46, - [0][1][2][0][0][4] = 46, - [0][1][2][0][0][5] = 46, - [0][1][2][0][0][6] = 46, - [0][1][2][0][0][7] = 46, - [0][1][2][0][0][8] = 46, - [0][1][2][0][0][9] = 46, - [0][1][2][0][0][10] = 46, - [0][1][2][0][0][11] = 42, - [0][1][2][0][0][12] = 40, - [0][1][2][0][0][13] = 0, - [0][1][2][1][0][0] = 34, - [0][1][2][1][0][1] = 34, - [0][1][2][1][0][2] = 34, - [0][1][2][1][0][3] = 34, - [0][1][2][1][0][4] = 34, - [0][1][2][1][0][5] = 34, - [0][1][2][1][0][6] = 34, - [0][1][2][1][0][7] = 34, - [0][1][2][1][0][8] = 34, - [0][1][2][1][0][9] = 34, - [0][1][2][1][0][10] = 34, - [0][1][2][1][0][11] = 34, - [0][1][2][1][0][12] = 34, - [0][1][2][1][0][13] = 0, - [1][0][2][0][0][0] = 0, - [1][0][2][0][0][1] = 0, - [1][0][2][0][0][2] = 56, - [1][0][2][0][0][3] = 56, - [1][0][2][0][0][4] = 58, - [1][0][2][0][0][5] = 58, - [1][0][2][0][0][6] = 54, - [1][0][2][0][0][7] = 50, - [1][0][2][0][0][8] = 50, - [1][0][2][0][0][9] = 42, - [1][0][2][0][0][10] = 40, - [1][0][2][0][0][11] = 0, - [1][0][2][0][0][12] = 0, - [1][0][2][0][0][13] = 0, - [1][1][2][0][0][0] = 0, - [1][1][2][0][0][1] = 0, - [1][1][2][0][0][2] = 46, - [1][1][2][0][0][3] = 46, - [1][1][2][0][0][4] = 46, - [1][1][2][0][0][5] = 46, - [1][1][2][0][0][6] = 46, - [1][1][2][0][0][7] = 46, - [1][1][2][0][0][8] = 46, - [1][1][2][0][0][9] = 38, - [1][1][2][0][0][10] = 36, - [1][1][2][0][0][11] = 0, - [1][1][2][0][0][12] = 0, - [1][1][2][0][0][13] = 0, - [1][1][2][1][0][0] = 0, - [1][1][2][1][0][1] = 0, - [1][1][2][1][0][2] = 34, - [1][1][2][1][0][3] = 34, - [1][1][2][1][0][4] = 34, - [1][1][2][1][0][5] = 34, - [1][1][2][1][0][6] = 34, - [1][1][2][1][0][7] = 34, - [1][1][2][1][0][8] = 34, - [1][1][2][1][0][9] = 34, - [1][1][2][1][0][10] = 34, - [1][1][2][1][0][11] = 0, - [1][1][2][1][0][12] = 0, - [1][1][2][1][0][13] = 0, - [0][0][0][0][2][0] = 76, - [0][0][0][0][1][0] = 56, - [0][0][0][0][3][0] = 68, - [0][0][0][0][5][0] = 76, - [0][0][0][0][6][0] = 56, - [0][0][0][0][9][0] = 56, - [0][0][0][0][8][0] = 60, - [0][0][0][0][11][0] = 56, - [0][0][0][0][2][1] = 76, - [0][0][0][0][1][1] = 56, - [0][0][0][0][3][1] = 68, - [0][0][0][0][5][1] = 76, - [0][0][0][0][6][1] = 56, - [0][0][0][0][9][1] = 56, - [0][0][0][0][8][1] = 60, - [0][0][0][0][11][1] = 56, - [0][0][0][0][2][2] = 76, - [0][0][0][0][1][2] = 56, - [0][0][0][0][3][2] = 68, - [0][0][0][0][5][2] = 76, - [0][0][0][0][6][2] = 56, - [0][0][0][0][9][2] = 56, - [0][0][0][0][8][2] = 60, - [0][0][0][0][11][2] = 56, - [0][0][0][0][2][3] = 76, - [0][0][0][0][1][3] = 56, - [0][0][0][0][3][3] = 68, - [0][0][0][0][5][3] = 76, - [0][0][0][0][6][3] = 56, - [0][0][0][0][9][3] = 56, - [0][0][0][0][8][3] = 60, - [0][0][0][0][11][3] = 56, - [0][0][0][0][2][4] = 76, - [0][0][0][0][1][4] = 56, - [0][0][0][0][3][4] = 68, - [0][0][0][0][5][4] = 76, - [0][0][0][0][6][4] = 56, - [0][0][0][0][9][4] = 56, - [0][0][0][0][8][4] = 60, - [0][0][0][0][11][4] = 56, - [0][0][0][0][2][5] = 76, - [0][0][0][0][1][5] = 56, - [0][0][0][0][3][5] = 68, - [0][0][0][0][5][5] = 76, - [0][0][0][0][6][5] = 56, - [0][0][0][0][9][5] = 56, - [0][0][0][0][8][5] = 60, - [0][0][0][0][11][5] = 56, - [0][0][0][0][2][6] = 76, - [0][0][0][0][1][6] = 56, - [0][0][0][0][3][6] = 68, - [0][0][0][0][5][6] = 76, - [0][0][0][0][6][6] = 56, - [0][0][0][0][9][6] = 56, - [0][0][0][0][8][6] = 60, - [0][0][0][0][11][6] = 56, - [0][0][0][0][2][7] = 76, - [0][0][0][0][1][7] = 56, - [0][0][0][0][3][7] = 68, - [0][0][0][0][5][7] = 76, - [0][0][0][0][6][7] = 56, - [0][0][0][0][9][7] = 56, - [0][0][0][0][8][7] = 60, - [0][0][0][0][11][7] = 56, - [0][0][0][0][2][8] = 76, - [0][0][0][0][1][8] = 56, - [0][0][0][0][3][8] = 68, - [0][0][0][0][5][8] = 76, - [0][0][0][0][6][8] = 56, - [0][0][0][0][9][8] = 56, - [0][0][0][0][8][8] = 60, - [0][0][0][0][11][8] = 56, - [0][0][0][0][2][9] = 76, - [0][0][0][0][1][9] = 56, - [0][0][0][0][3][9] = 68, - [0][0][0][0][5][9] = 76, - [0][0][0][0][6][9] = 56, - [0][0][0][0][9][9] = 56, - [0][0][0][0][8][9] = 60, - [0][0][0][0][11][9] = 56, - [0][0][0][0][2][10] = 76, - [0][0][0][0][1][10] = 56, - [0][0][0][0][3][10] = 68, - [0][0][0][0][5][10] = 76, - [0][0][0][0][6][10] = 56, - [0][0][0][0][9][10] = 56, - [0][0][0][0][8][10] = 60, - [0][0][0][0][11][10] = 56, - [0][0][0][0][2][11] = 68, - [0][0][0][0][1][11] = 56, - [0][0][0][0][3][11] = 68, - [0][0][0][0][5][11] = 68, - [0][0][0][0][6][11] = 56, - [0][0][0][0][9][11] = 56, - [0][0][0][0][8][11] = 60, - [0][0][0][0][11][11] = 56, - [0][0][0][0][2][12] = 48, - [0][0][0][0][1][12] = 56, - [0][0][0][0][3][12] = 68, - [0][0][0][0][5][12] = 48, - [0][0][0][0][6][12] = 56, - [0][0][0][0][9][12] = 56, - [0][0][0][0][8][12] = 60, - [0][0][0][0][11][12] = 56, - [0][0][0][0][2][13] = 127, - [0][0][0][0][1][13] = 127, - [0][0][0][0][3][13] = 76, - [0][0][0][0][5][13] = 127, - [0][0][0][0][6][13] = 127, - [0][0][0][0][9][13] = 127, - [0][0][0][0][8][13] = 127, - [0][0][0][0][11][13] = 127, - [0][1][0][0][2][0] = 74, - [0][1][0][0][1][0] = 44, - [0][1][0][0][3][0] = 56, - [0][1][0][0][5][0] = 74, - [0][1][0][0][6][0] = 44, - [0][1][0][0][9][0] = 44, - [0][1][0][0][8][0] = 48, - [0][1][0][0][11][0] = 44, - [0][1][0][0][2][1] = 76, - [0][1][0][0][1][1] = 44, - [0][1][0][0][3][1] = 56, - [0][1][0][0][5][1] = 76, - [0][1][0][0][6][1] = 44, - [0][1][0][0][9][1] = 44, - [0][1][0][0][8][1] = 48, - [0][1][0][0][11][1] = 44, - [0][1][0][0][2][2] = 76, - [0][1][0][0][1][2] = 44, - [0][1][0][0][3][2] = 56, - [0][1][0][0][5][2] = 76, - [0][1][0][0][6][2] = 44, - [0][1][0][0][9][2] = 44, - [0][1][0][0][8][2] = 48, - [0][1][0][0][11][2] = 44, - [0][1][0][0][2][3] = 76, - [0][1][0][0][1][3] = 44, - [0][1][0][0][3][3] = 56, - [0][1][0][0][5][3] = 76, - [0][1][0][0][6][3] = 44, - [0][1][0][0][9][3] = 44, - [0][1][0][0][8][3] = 48, - [0][1][0][0][11][3] = 44, - [0][1][0][0][2][4] = 76, - [0][1][0][0][1][4] = 44, - [0][1][0][0][3][4] = 56, - [0][1][0][0][5][4] = 76, - [0][1][0][0][6][4] = 44, - [0][1][0][0][9][4] = 44, - [0][1][0][0][8][4] = 48, - [0][1][0][0][11][4] = 44, - [0][1][0][0][2][5] = 76, - [0][1][0][0][1][5] = 44, - [0][1][0][0][3][5] = 56, - [0][1][0][0][5][5] = 76, - [0][1][0][0][6][5] = 44, - [0][1][0][0][9][5] = 44, - [0][1][0][0][8][5] = 48, - [0][1][0][0][11][5] = 44, - [0][1][0][0][2][6] = 76, - [0][1][0][0][1][6] = 44, - [0][1][0][0][3][6] = 56, - [0][1][0][0][5][6] = 76, - [0][1][0][0][6][6] = 44, - [0][1][0][0][9][6] = 44, - [0][1][0][0][8][6] = 48, - [0][1][0][0][11][6] = 44, - [0][1][0][0][2][7] = 76, - [0][1][0][0][1][7] = 44, - [0][1][0][0][3][7] = 56, - [0][1][0][0][5][7] = 76, - [0][1][0][0][6][7] = 44, - [0][1][0][0][9][7] = 44, - [0][1][0][0][8][7] = 48, - [0][1][0][0][11][7] = 44, - [0][1][0][0][2][8] = 76, - [0][1][0][0][1][8] = 44, - [0][1][0][0][3][8] = 56, - [0][1][0][0][5][8] = 76, - [0][1][0][0][6][8] = 44, - [0][1][0][0][9][8] = 44, - [0][1][0][0][8][8] = 48, - [0][1][0][0][11][8] = 44, - [0][1][0][0][2][9] = 76, - [0][1][0][0][1][9] = 44, - [0][1][0][0][3][9] = 56, - [0][1][0][0][5][9] = 76, - [0][1][0][0][6][9] = 44, - [0][1][0][0][9][9] = 44, - [0][1][0][0][8][9] = 48, - [0][1][0][0][11][9] = 44, - [0][1][0][0][2][10] = 62, - [0][1][0][0][1][10] = 44, - [0][1][0][0][3][10] = 56, - [0][1][0][0][5][10] = 62, - [0][1][0][0][6][10] = 44, - [0][1][0][0][9][10] = 44, - [0][1][0][0][8][10] = 48, - [0][1][0][0][11][10] = 44, - [0][1][0][0][2][11] = 52, - [0][1][0][0][1][11] = 44, - [0][1][0][0][3][11] = 56, - [0][1][0][0][5][11] = 52, - [0][1][0][0][6][11] = 44, - [0][1][0][0][9][11] = 44, - [0][1][0][0][8][11] = 48, - [0][1][0][0][11][11] = 44, - [0][1][0][0][2][12] = 38, - [0][1][0][0][1][12] = 44, - [0][1][0][0][3][12] = 56, - [0][1][0][0][5][12] = 38, - [0][1][0][0][6][12] = 44, - [0][1][0][0][9][12] = 44, - [0][1][0][0][8][12] = 48, - [0][1][0][0][11][12] = 44, - [0][1][0][0][2][13] = 127, - [0][1][0][0][1][13] = 127, - [0][1][0][0][3][13] = 64, - [0][1][0][0][5][13] = 127, - [0][1][0][0][6][13] = 127, - [0][1][0][0][9][13] = 127, - [0][1][0][0][8][13] = 127, - [0][1][0][0][11][13] = 127, - [1][0][0][0][2][0] = 127, - [1][0][0][0][1][0] = 127, - [1][0][0][0][3][0] = 127, - [1][0][0][0][5][0] = 127, - [1][0][0][0][6][0] = 127, - [1][0][0][0][9][0] = 127, - [1][0][0][0][8][0] = 127, - [1][0][0][0][11][0] = 127, - [1][0][0][0][2][1] = 127, - [1][0][0][0][1][1] = 127, - [1][0][0][0][3][1] = 127, - [1][0][0][0][5][1] = 127, - [1][0][0][0][6][1] = 127, - [1][0][0][0][9][1] = 127, - [1][0][0][0][8][1] = 127, - [1][0][0][0][11][1] = 127, - [1][0][0][0][2][2] = 60, - [1][0][0][0][1][2] = 58, - [1][0][0][0][3][2] = 68, - [1][0][0][0][5][2] = 60, - [1][0][0][0][6][2] = 58, - [1][0][0][0][9][2] = 58, - [1][0][0][0][8][2] = 60, - [1][0][0][0][11][2] = 58, - [1][0][0][0][2][3] = 60, - [1][0][0][0][1][3] = 58, - [1][0][0][0][3][3] = 68, - [1][0][0][0][5][3] = 60, - [1][0][0][0][6][3] = 58, - [1][0][0][0][9][3] = 58, - [1][0][0][0][8][3] = 60, - [1][0][0][0][11][3] = 58, - [1][0][0][0][2][4] = 60, - [1][0][0][0][1][4] = 58, - [1][0][0][0][3][4] = 68, - [1][0][0][0][5][4] = 60, - [1][0][0][0][6][4] = 58, - [1][0][0][0][9][4] = 58, - [1][0][0][0][8][4] = 60, - [1][0][0][0][11][4] = 58, - [1][0][0][0][2][5] = 60, - [1][0][0][0][1][5] = 58, - [1][0][0][0][3][5] = 68, - [1][0][0][0][5][5] = 60, - [1][0][0][0][6][5] = 58, - [1][0][0][0][9][5] = 58, - [1][0][0][0][8][5] = 60, - [1][0][0][0][11][5] = 58, - [1][0][0][0][2][6] = 46, - [1][0][0][0][1][6] = 58, - [1][0][0][0][3][6] = 68, - [1][0][0][0][5][6] = 46, - [1][0][0][0][6][6] = 58, - [1][0][0][0][9][6] = 58, - [1][0][0][0][8][6] = 60, - [1][0][0][0][11][6] = 58, - [1][0][0][0][2][7] = 46, - [1][0][0][0][1][7] = 58, - [1][0][0][0][3][7] = 68, - [1][0][0][0][5][7] = 46, - [1][0][0][0][6][7] = 58, - [1][0][0][0][9][7] = 58, - [1][0][0][0][8][7] = 60, - [1][0][0][0][11][7] = 58, - [1][0][0][0][2][8] = 46, - [1][0][0][0][1][8] = 58, - [1][0][0][0][3][8] = 68, - [1][0][0][0][5][8] = 46, - [1][0][0][0][6][8] = 58, - [1][0][0][0][9][8] = 58, - [1][0][0][0][8][8] = 60, - [1][0][0][0][11][8] = 58, - [1][0][0][0][2][9] = 32, - [1][0][0][0][1][9] = 58, - [1][0][0][0][3][9] = 68, - [1][0][0][0][5][9] = 32, - [1][0][0][0][6][9] = 58, - [1][0][0][0][9][9] = 58, - [1][0][0][0][8][9] = 60, - [1][0][0][0][11][9] = 58, - [1][0][0][0][2][10] = 32, - [1][0][0][0][1][10] = 58, - [1][0][0][0][3][10] = 68, - [1][0][0][0][5][10] = 32, - [1][0][0][0][6][10] = 58, - [1][0][0][0][9][10] = 58, - [1][0][0][0][8][10] = 60, - [1][0][0][0][11][10] = 58, - [1][0][0][0][2][11] = 127, - [1][0][0][0][1][11] = 127, - [1][0][0][0][3][11] = 127, - [1][0][0][0][5][11] = 127, - [1][0][0][0][6][11] = 127, - [1][0][0][0][9][11] = 127, - [1][0][0][0][8][11] = 127, - [1][0][0][0][11][11] = 127, - [1][0][0][0][2][12] = 127, - [1][0][0][0][1][12] = 127, - [1][0][0][0][3][12] = 127, - [1][0][0][0][5][12] = 127, - [1][0][0][0][6][12] = 127, - [1][0][0][0][9][12] = 127, - [1][0][0][0][8][12] = 127, - [1][0][0][0][11][12] = 127, - [1][0][0][0][2][13] = 127, - [1][0][0][0][1][13] = 127, - [1][0][0][0][3][13] = 127, - [1][0][0][0][5][13] = 127, - [1][0][0][0][6][13] = 127, - [1][0][0][0][9][13] = 127, - [1][0][0][0][8][13] = 127, - [1][0][0][0][11][13] = 127, - [1][1][0][0][2][0] = 127, - [1][1][0][0][1][0] = 127, - [1][1][0][0][3][0] = 127, - [1][1][0][0][5][0] = 127, - [1][1][0][0][6][0] = 127, - [1][1][0][0][9][0] = 127, - [1][1][0][0][8][0] = 127, - [1][1][0][0][11][0] = 127, - [1][1][0][0][2][1] = 127, - [1][1][0][0][1][1] = 127, - [1][1][0][0][3][1] = 127, - [1][1][0][0][5][1] = 127, - [1][1][0][0][6][1] = 127, - [1][1][0][0][9][1] = 127, - [1][1][0][0][8][1] = 127, - [1][1][0][0][11][1] = 127, - [1][1][0][0][2][2] = 48, - [1][1][0][0][1][2] = 46, - [1][1][0][0][3][2] = 56, - [1][1][0][0][5][2] = 48, - [1][1][0][0][6][2] = 46, - [1][1][0][0][9][2] = 46, - [1][1][0][0][8][2] = 48, - [1][1][0][0][11][2] = 46, - [1][1][0][0][2][3] = 48, - [1][1][0][0][1][3] = 46, - [1][1][0][0][3][3] = 56, - [1][1][0][0][5][3] = 48, - [1][1][0][0][6][3] = 46, - [1][1][0][0][9][3] = 46, - [1][1][0][0][8][3] = 48, - [1][1][0][0][11][3] = 46, - [1][1][0][0][2][4] = 48, - [1][1][0][0][1][4] = 46, - [1][1][0][0][3][4] = 56, - [1][1][0][0][5][4] = 48, - [1][1][0][0][6][4] = 46, - [1][1][0][0][9][4] = 46, - [1][1][0][0][8][4] = 48, - [1][1][0][0][11][4] = 46, - [1][1][0][0][2][5] = 58, - [1][1][0][0][1][5] = 46, - [1][1][0][0][3][5] = 56, - [1][1][0][0][5][5] = 58, - [1][1][0][0][6][5] = 46, - [1][1][0][0][9][5] = 46, - [1][1][0][0][8][5] = 48, - [1][1][0][0][11][5] = 46, - [1][1][0][0][2][6] = 46, - [1][1][0][0][1][6] = 46, - [1][1][0][0][3][6] = 56, - [1][1][0][0][5][6] = 46, - [1][1][0][0][6][6] = 46, - [1][1][0][0][9][6] = 46, - [1][1][0][0][8][6] = 48, - [1][1][0][0][11][6] = 46, - [1][1][0][0][2][7] = 46, - [1][1][0][0][1][7] = 46, - [1][1][0][0][3][7] = 56, - [1][1][0][0][5][7] = 46, - [1][1][0][0][6][7] = 46, - [1][1][0][0][9][7] = 46, - [1][1][0][0][8][7] = 48, - [1][1][0][0][11][7] = 46, - [1][1][0][0][2][8] = 46, - [1][1][0][0][1][8] = 46, - [1][1][0][0][3][8] = 56, - [1][1][0][0][5][8] = 46, - [1][1][0][0][6][8] = 46, - [1][1][0][0][9][8] = 46, - [1][1][0][0][8][8] = 48, - [1][1][0][0][11][8] = 46, - [1][1][0][0][2][9] = 24, - [1][1][0][0][1][9] = 46, - [1][1][0][0][3][9] = 56, - [1][1][0][0][5][9] = 24, - [1][1][0][0][6][9] = 46, - [1][1][0][0][9][9] = 46, - [1][1][0][0][8][9] = 48, - [1][1][0][0][11][9] = 46, - [1][1][0][0][2][10] = 24, - [1][1][0][0][1][10] = 46, - [1][1][0][0][3][10] = 56, - [1][1][0][0][5][10] = 24, - [1][1][0][0][6][10] = 46, - [1][1][0][0][9][10] = 46, - [1][1][0][0][8][10] = 48, - [1][1][0][0][11][10] = 46, - [1][1][0][0][2][11] = 127, - [1][1][0][0][1][11] = 127, - [1][1][0][0][3][11] = 127, - [1][1][0][0][5][11] = 127, - [1][1][0][0][6][11] = 127, - [1][1][0][0][9][11] = 127, - [1][1][0][0][8][11] = 127, - [1][1][0][0][11][11] = 127, - [1][1][0][0][2][12] = 127, - [1][1][0][0][1][12] = 127, - [1][1][0][0][3][12] = 127, - [1][1][0][0][5][12] = 127, - [1][1][0][0][6][12] = 127, - [1][1][0][0][9][12] = 127, - [1][1][0][0][8][12] = 127, - [1][1][0][0][11][12] = 127, - [1][1][0][0][2][13] = 127, - [1][1][0][0][1][13] = 127, - [1][1][0][0][3][13] = 127, - [1][1][0][0][5][13] = 127, - [1][1][0][0][6][13] = 127, - [1][1][0][0][9][13] = 127, - [1][1][0][0][8][13] = 127, - [1][1][0][0][11][13] = 127, - [0][0][1][0][2][0] = 66, - [0][0][1][0][1][0] = 58, - [0][0][1][0][3][0] = 76, - [0][0][1][0][5][0] = 66, - [0][0][1][0][6][0] = 58, - [0][0][1][0][9][0] = 58, - [0][0][1][0][8][0] = 60, - [0][0][1][0][11][0] = 58, - [0][0][1][0][2][1] = 66, - [0][0][1][0][1][1] = 58, - [0][0][1][0][3][1] = 76, - [0][0][1][0][5][1] = 66, - [0][0][1][0][6][1] = 58, - [0][0][1][0][9][1] = 58, - [0][0][1][0][8][1] = 60, - [0][0][1][0][11][1] = 58, - [0][0][1][0][2][2] = 70, - [0][0][1][0][1][2] = 58, - [0][0][1][0][3][2] = 76, - [0][0][1][0][5][2] = 70, - [0][0][1][0][6][2] = 58, - [0][0][1][0][9][2] = 58, - [0][0][1][0][8][2] = 60, - [0][0][1][0][11][2] = 58, - [0][0][1][0][2][3] = 74, - [0][0][1][0][1][3] = 58, - [0][0][1][0][3][3] = 76, - [0][0][1][0][5][3] = 74, - [0][0][1][0][6][3] = 58, - [0][0][1][0][9][3] = 58, - [0][0][1][0][8][3] = 60, - [0][0][1][0][11][3] = 58, - [0][0][1][0][2][4] = 78, - [0][0][1][0][1][4] = 58, - [0][0][1][0][3][4] = 76, - [0][0][1][0][5][4] = 78, - [0][0][1][0][6][4] = 58, - [0][0][1][0][9][4] = 58, - [0][0][1][0][8][4] = 60, - [0][0][1][0][11][4] = 58, - [0][0][1][0][2][5] = 78, - [0][0][1][0][1][5] = 58, - [0][0][1][0][3][5] = 76, - [0][0][1][0][5][5] = 78, - [0][0][1][0][6][5] = 58, - [0][0][1][0][9][5] = 58, - [0][0][1][0][8][5] = 60, - [0][0][1][0][11][5] = 58, - [0][0][1][0][2][6] = 78, - [0][0][1][0][1][6] = 58, - [0][0][1][0][3][6] = 76, - [0][0][1][0][5][6] = 78, - [0][0][1][0][6][6] = 58, - [0][0][1][0][9][6] = 58, - [0][0][1][0][8][6] = 60, - [0][0][1][0][11][6] = 58, - [0][0][1][0][2][7] = 74, - [0][0][1][0][1][7] = 58, - [0][0][1][0][3][7] = 76, - [0][0][1][0][5][7] = 74, - [0][0][1][0][6][7] = 58, - [0][0][1][0][9][7] = 58, - [0][0][1][0][8][7] = 60, - [0][0][1][0][11][7] = 58, - [0][0][1][0][2][8] = 70, - [0][0][1][0][1][8] = 58, - [0][0][1][0][3][8] = 76, - [0][0][1][0][5][8] = 70, - [0][0][1][0][6][8] = 58, - [0][0][1][0][9][8] = 58, - [0][0][1][0][8][8] = 60, - [0][0][1][0][11][8] = 58, - [0][0][1][0][2][9] = 66, - [0][0][1][0][1][9] = 58, - [0][0][1][0][3][9] = 76, - [0][0][1][0][5][9] = 66, - [0][0][1][0][6][9] = 58, - [0][0][1][0][9][9] = 58, - [0][0][1][0][8][9] = 60, - [0][0][1][0][11][9] = 58, - [0][0][1][0][2][10] = 66, - [0][0][1][0][1][10] = 58, - [0][0][1][0][3][10] = 76, - [0][0][1][0][5][10] = 66, - [0][0][1][0][6][10] = 58, - [0][0][1][0][9][10] = 58, - [0][0][1][0][8][10] = 60, - [0][0][1][0][11][10] = 58, - [0][0][1][0][2][11] = 56, - [0][0][1][0][1][11] = 58, - [0][0][1][0][3][11] = 76, - [0][0][1][0][5][11] = 56, - [0][0][1][0][6][11] = 58, - [0][0][1][0][9][11] = 58, - [0][0][1][0][8][11] = 60, - [0][0][1][0][11][11] = 58, - [0][0][1][0][2][12] = 52, - [0][0][1][0][1][12] = 58, - [0][0][1][0][3][12] = 76, - [0][0][1][0][5][12] = 52, - [0][0][1][0][6][12] = 58, - [0][0][1][0][9][12] = 58, - [0][0][1][0][8][12] = 60, - [0][0][1][0][11][12] = 58, - [0][0][1][0][2][13] = 127, - [0][0][1][0][1][13] = 127, - [0][0][1][0][3][13] = 127, - [0][0][1][0][5][13] = 127, - [0][0][1][0][6][13] = 127, - [0][0][1][0][9][13] = 127, - [0][0][1][0][8][13] = 127, - [0][0][1][0][11][13] = 127, - [0][1][1][0][2][0] = 62, - [0][1][1][0][1][0] = 46, - [0][1][1][0][3][0] = 64, - [0][1][1][0][5][0] = 62, - [0][1][1][0][6][0] = 46, - [0][1][1][0][9][0] = 46, - [0][1][1][0][8][0] = 48, - [0][1][1][0][11][0] = 46, - [0][1][1][0][2][1] = 62, - [0][1][1][0][1][1] = 46, - [0][1][1][0][3][1] = 64, - [0][1][1][0][5][1] = 62, - [0][1][1][0][6][1] = 46, - [0][1][1][0][9][1] = 46, - [0][1][1][0][8][1] = 48, - [0][1][1][0][11][1] = 46, - [0][1][1][0][2][2] = 66, - [0][1][1][0][1][2] = 46, - [0][1][1][0][3][2] = 64, - [0][1][1][0][5][2] = 66, - [0][1][1][0][6][2] = 46, - [0][1][1][0][9][2] = 46, - [0][1][1][0][8][2] = 48, - [0][1][1][0][11][2] = 46, - [0][1][1][0][2][3] = 70, - [0][1][1][0][1][3] = 46, - [0][1][1][0][3][3] = 64, - [0][1][1][0][5][3] = 70, - [0][1][1][0][6][3] = 46, - [0][1][1][0][9][3] = 46, - [0][1][1][0][8][3] = 48, - [0][1][1][0][11][3] = 46, - [0][1][1][0][2][4] = 78, - [0][1][1][0][1][4] = 46, - [0][1][1][0][3][4] = 64, - [0][1][1][0][5][4] = 78, - [0][1][1][0][6][4] = 46, - [0][1][1][0][9][4] = 46, - [0][1][1][0][8][4] = 48, - [0][1][1][0][11][4] = 46, - [0][1][1][0][2][5] = 78, - [0][1][1][0][1][5] = 46, - [0][1][1][0][3][5] = 64, - [0][1][1][0][5][5] = 78, - [0][1][1][0][6][5] = 46, - [0][1][1][0][9][5] = 46, - [0][1][1][0][8][5] = 48, - [0][1][1][0][11][5] = 46, - [0][1][1][0][2][6] = 78, - [0][1][1][0][1][6] = 46, - [0][1][1][0][3][6] = 64, - [0][1][1][0][5][6] = 78, - [0][1][1][0][6][6] = 46, - [0][1][1][0][9][6] = 46, - [0][1][1][0][8][6] = 48, - [0][1][1][0][11][6] = 46, - [0][1][1][0][2][7] = 70, - [0][1][1][0][1][7] = 46, - [0][1][1][0][3][7] = 64, - [0][1][1][0][5][7] = 70, - [0][1][1][0][6][7] = 46, - [0][1][1][0][9][7] = 46, - [0][1][1][0][8][7] = 48, - [0][1][1][0][11][7] = 46, - [0][1][1][0][2][8] = 66, - [0][1][1][0][1][8] = 46, - [0][1][1][0][3][8] = 64, - [0][1][1][0][5][8] = 66, - [0][1][1][0][6][8] = 46, - [0][1][1][0][9][8] = 46, - [0][1][1][0][8][8] = 48, - [0][1][1][0][11][8] = 46, - [0][1][1][0][2][9] = 62, - [0][1][1][0][1][9] = 46, - [0][1][1][0][3][9] = 64, - [0][1][1][0][5][9] = 62, - [0][1][1][0][6][9] = 46, - [0][1][1][0][9][9] = 46, - [0][1][1][0][8][9] = 48, - [0][1][1][0][11][9] = 46, - [0][1][1][0][2][10] = 62, - [0][1][1][0][1][10] = 46, - [0][1][1][0][3][10] = 64, - [0][1][1][0][5][10] = 62, - [0][1][1][0][6][10] = 46, - [0][1][1][0][9][10] = 46, - [0][1][1][0][8][10] = 48, - [0][1][1][0][11][10] = 46, - [0][1][1][0][2][11] = 42, - [0][1][1][0][1][11] = 46, - [0][1][1][0][3][11] = 64, - [0][1][1][0][5][11] = 42, - [0][1][1][0][6][11] = 46, - [0][1][1][0][9][11] = 46, - [0][1][1][0][8][11] = 48, - [0][1][1][0][11][11] = 46, - [0][1][1][0][2][12] = 40, - [0][1][1][0][1][12] = 46, - [0][1][1][0][3][12] = 64, - [0][1][1][0][5][12] = 40, - [0][1][1][0][6][12] = 46, - [0][1][1][0][9][12] = 46, - [0][1][1][0][8][12] = 48, - [0][1][1][0][11][12] = 46, - [0][1][1][0][2][13] = 127, - [0][1][1][0][1][13] = 127, - [0][1][1][0][3][13] = 127, - [0][1][1][0][5][13] = 127, - [0][1][1][0][6][13] = 127, - [0][1][1][0][9][13] = 127, - [0][1][1][0][8][13] = 127, - [0][1][1][0][11][13] = 127, - [0][0][2][0][2][0] = 66, - [0][0][2][0][1][0] = 58, - [0][0][2][0][3][0] = 76, - [0][0][2][0][5][0] = 66, - [0][0][2][0][6][0] = 58, - [0][0][2][0][9][0] = 58, - [0][0][2][0][8][0] = 60, - [0][0][2][0][11][0] = 58, - [0][0][2][0][2][1] = 66, - [0][0][2][0][1][1] = 58, - [0][0][2][0][3][1] = 76, - [0][0][2][0][5][1] = 66, - [0][0][2][0][6][1] = 58, - [0][0][2][0][9][1] = 58, - [0][0][2][0][8][1] = 60, - [0][0][2][0][11][1] = 58, - [0][0][2][0][2][2] = 70, - [0][0][2][0][1][2] = 58, - [0][0][2][0][3][2] = 76, - [0][0][2][0][5][2] = 70, - [0][0][2][0][6][2] = 58, - [0][0][2][0][9][2] = 58, - [0][0][2][0][8][2] = 60, - [0][0][2][0][11][2] = 58, - [0][0][2][0][2][3] = 74, - [0][0][2][0][1][3] = 58, - [0][0][2][0][3][3] = 76, - [0][0][2][0][5][3] = 74, - [0][0][2][0][6][3] = 58, - [0][0][2][0][9][3] = 58, - [0][0][2][0][8][3] = 60, - [0][0][2][0][11][3] = 58, - [0][0][2][0][2][4] = 76, - [0][0][2][0][1][4] = 58, - [0][0][2][0][3][4] = 76, - [0][0][2][0][5][4] = 76, - [0][0][2][0][6][4] = 58, - [0][0][2][0][9][4] = 58, - [0][0][2][0][8][4] = 60, - [0][0][2][0][11][4] = 58, - [0][0][2][0][2][5] = 76, - [0][0][2][0][1][5] = 58, - [0][0][2][0][3][5] = 76, - [0][0][2][0][5][5] = 76, - [0][0][2][0][6][5] = 58, - [0][0][2][0][9][5] = 58, - [0][0][2][0][8][5] = 60, - [0][0][2][0][11][5] = 58, - [0][0][2][0][2][6] = 76, - [0][0][2][0][1][6] = 58, - [0][0][2][0][3][6] = 76, - [0][0][2][0][5][6] = 76, - [0][0][2][0][6][6] = 58, - [0][0][2][0][9][6] = 58, - [0][0][2][0][8][6] = 60, - [0][0][2][0][11][6] = 58, - [0][0][2][0][2][7] = 74, - [0][0][2][0][1][7] = 58, - [0][0][2][0][3][7] = 76, - [0][0][2][0][5][7] = 74, - [0][0][2][0][6][7] = 58, - [0][0][2][0][9][7] = 58, - [0][0][2][0][8][7] = 60, - [0][0][2][0][11][7] = 58, - [0][0][2][0][2][8] = 70, - [0][0][2][0][1][8] = 58, - [0][0][2][0][3][8] = 76, - [0][0][2][0][5][8] = 70, - [0][0][2][0][6][8] = 58, - [0][0][2][0][9][8] = 58, - [0][0][2][0][8][8] = 60, - [0][0][2][0][11][8] = 58, - [0][0][2][0][2][9] = 66, - [0][0][2][0][1][9] = 58, - [0][0][2][0][3][9] = 76, - [0][0][2][0][5][9] = 66, - [0][0][2][0][6][9] = 58, - [0][0][2][0][9][9] = 58, - [0][0][2][0][8][9] = 60, - [0][0][2][0][11][9] = 58, - [0][0][2][0][2][10] = 66, - [0][0][2][0][1][10] = 58, - [0][0][2][0][3][10] = 76, - [0][0][2][0][5][10] = 66, - [0][0][2][0][6][10] = 58, - [0][0][2][0][9][10] = 58, - [0][0][2][0][8][10] = 60, - [0][0][2][0][11][10] = 58, - [0][0][2][0][2][11] = 54, - [0][0][2][0][1][11] = 58, - [0][0][2][0][3][11] = 76, - [0][0][2][0][5][11] = 54, - [0][0][2][0][6][11] = 58, - [0][0][2][0][9][11] = 58, - [0][0][2][0][8][11] = 60, - [0][0][2][0][11][11] = 58, - [0][0][2][0][2][12] = 50, - [0][0][2][0][1][12] = 58, - [0][0][2][0][3][12] = 76, - [0][0][2][0][5][12] = 50, - [0][0][2][0][6][12] = 58, - [0][0][2][0][9][12] = 58, - [0][0][2][0][8][12] = 60, - [0][0][2][0][11][12] = 58, - [0][0][2][0][2][13] = 127, - [0][0][2][0][1][13] = 127, - [0][0][2][0][3][13] = 127, - [0][0][2][0][5][13] = 127, - [0][0][2][0][6][13] = 127, - [0][0][2][0][9][13] = 127, - [0][0][2][0][8][13] = 127, - [0][0][2][0][11][13] = 127, - [0][1][2][0][2][0] = 62, - [0][1][2][0][1][0] = 46, - [0][1][2][0][3][0] = 64, - [0][1][2][0][5][0] = 62, - [0][1][2][0][6][0] = 46, - [0][1][2][0][9][0] = 46, - [0][1][2][0][8][0] = 48, - [0][1][2][0][11][0] = 46, - [0][1][2][0][2][1] = 62, - [0][1][2][0][1][1] = 46, - [0][1][2][0][3][1] = 64, - [0][1][2][0][5][1] = 62, - [0][1][2][0][6][1] = 46, - [0][1][2][0][9][1] = 46, - [0][1][2][0][8][1] = 48, - [0][1][2][0][11][1] = 46, - [0][1][2][0][2][2] = 66, - [0][1][2][0][1][2] = 46, - [0][1][2][0][3][2] = 64, - [0][1][2][0][5][2] = 66, - [0][1][2][0][6][2] = 46, - [0][1][2][0][9][2] = 46, - [0][1][2][0][8][2] = 48, - [0][1][2][0][11][2] = 46, - [0][1][2][0][2][3] = 70, - [0][1][2][0][1][3] = 46, - [0][1][2][0][3][3] = 64, - [0][1][2][0][5][3] = 70, - [0][1][2][0][6][3] = 46, - [0][1][2][0][9][3] = 46, - [0][1][2][0][8][3] = 48, - [0][1][2][0][11][3] = 46, - [0][1][2][0][2][4] = 76, - [0][1][2][0][1][4] = 46, - [0][1][2][0][3][4] = 64, - [0][1][2][0][5][4] = 76, - [0][1][2][0][6][4] = 46, - [0][1][2][0][9][4] = 46, - [0][1][2][0][8][4] = 48, - [0][1][2][0][11][4] = 46, - [0][1][2][0][2][5] = 76, - [0][1][2][0][1][5] = 46, - [0][1][2][0][3][5] = 64, - [0][1][2][0][5][5] = 76, - [0][1][2][0][6][5] = 46, - [0][1][2][0][9][5] = 46, - [0][1][2][0][8][5] = 48, - [0][1][2][0][11][5] = 46, - [0][1][2][0][2][6] = 76, - [0][1][2][0][1][6] = 46, - [0][1][2][0][3][6] = 64, - [0][1][2][0][5][6] = 76, - [0][1][2][0][6][6] = 46, - [0][1][2][0][9][6] = 46, - [0][1][2][0][8][6] = 48, - [0][1][2][0][11][6] = 46, - [0][1][2][0][2][7] = 68, - [0][1][2][0][1][7] = 46, - [0][1][2][0][3][7] = 64, - [0][1][2][0][5][7] = 68, - [0][1][2][0][6][7] = 46, - [0][1][2][0][9][7] = 46, - [0][1][2][0][8][7] = 48, - [0][1][2][0][11][7] = 46, - [0][1][2][0][2][8] = 64, - [0][1][2][0][1][8] = 46, - [0][1][2][0][3][8] = 64, - [0][1][2][0][5][8] = 64, - [0][1][2][0][6][8] = 46, - [0][1][2][0][9][8] = 46, - [0][1][2][0][8][8] = 48, - [0][1][2][0][11][8] = 46, - [0][1][2][0][2][9] = 60, - [0][1][2][0][1][9] = 46, - [0][1][2][0][3][9] = 64, - [0][1][2][0][5][9] = 60, - [0][1][2][0][6][9] = 46, - [0][1][2][0][9][9] = 46, - [0][1][2][0][8][9] = 48, - [0][1][2][0][11][9] = 46, - [0][1][2][0][2][10] = 60, - [0][1][2][0][1][10] = 46, - [0][1][2][0][3][10] = 64, - [0][1][2][0][5][10] = 60, - [0][1][2][0][6][10] = 46, - [0][1][2][0][9][10] = 46, - [0][1][2][0][8][10] = 48, - [0][1][2][0][11][10] = 46, - [0][1][2][0][2][11] = 42, - [0][1][2][0][1][11] = 46, - [0][1][2][0][3][11] = 64, - [0][1][2][0][5][11] = 42, - [0][1][2][0][6][11] = 46, - [0][1][2][0][9][11] = 46, - [0][1][2][0][8][11] = 48, - [0][1][2][0][11][11] = 46, - [0][1][2][0][2][12] = 40, - [0][1][2][0][1][12] = 46, - [0][1][2][0][3][12] = 64, - [0][1][2][0][5][12] = 40, - [0][1][2][0][6][12] = 46, - [0][1][2][0][9][12] = 46, - [0][1][2][0][8][12] = 48, - [0][1][2][0][11][12] = 46, - [0][1][2][0][2][13] = 127, - [0][1][2][0][1][13] = 127, - [0][1][2][0][3][13] = 127, - [0][1][2][0][5][13] = 127, - [0][1][2][0][6][13] = 127, - [0][1][2][0][9][13] = 127, - [0][1][2][0][8][13] = 127, - [0][1][2][0][11][13] = 127, - [0][1][2][1][2][0] = 62, - [0][1][2][1][1][0] = 34, - [0][1][2][1][3][0] = 64, - [0][1][2][1][5][0] = 62, - [0][1][2][1][6][0] = 34, - [0][1][2][1][9][0] = 34, - [0][1][2][1][8][0] = 36, - [0][1][2][1][11][0] = 34, - [0][1][2][1][2][1] = 62, - [0][1][2][1][1][1] = 34, - [0][1][2][1][3][1] = 64, - [0][1][2][1][5][1] = 62, - [0][1][2][1][6][1] = 34, - [0][1][2][1][9][1] = 34, - [0][1][2][1][8][1] = 36, - [0][1][2][1][11][1] = 34, - [0][1][2][1][2][2] = 66, - [0][1][2][1][1][2] = 34, - [0][1][2][1][3][2] = 64, - [0][1][2][1][5][2] = 66, - [0][1][2][1][6][2] = 34, - [0][1][2][1][9][2] = 34, - [0][1][2][1][8][2] = 36, - [0][1][2][1][11][2] = 34, - [0][1][2][1][2][3] = 70, - [0][1][2][1][1][3] = 34, - [0][1][2][1][3][3] = 64, - [0][1][2][1][5][3] = 70, - [0][1][2][1][6][3] = 34, - [0][1][2][1][9][3] = 34, - [0][1][2][1][8][3] = 36, - [0][1][2][1][11][3] = 34, - [0][1][2][1][2][4] = 76, - [0][1][2][1][1][4] = 34, - [0][1][2][1][3][4] = 64, - [0][1][2][1][5][4] = 76, - [0][1][2][1][6][4] = 34, - [0][1][2][1][9][4] = 34, - [0][1][2][1][8][4] = 36, - [0][1][2][1][11][4] = 34, - [0][1][2][1][2][5] = 76, - [0][1][2][1][1][5] = 34, - [0][1][2][1][3][5] = 64, - [0][1][2][1][5][5] = 76, - [0][1][2][1][6][5] = 34, - [0][1][2][1][9][5] = 34, - [0][1][2][1][8][5] = 36, - [0][1][2][1][11][5] = 34, - [0][1][2][1][2][6] = 76, - [0][1][2][1][1][6] = 34, - [0][1][2][1][3][6] = 64, - [0][1][2][1][5][6] = 76, - [0][1][2][1][6][6] = 34, - [0][1][2][1][9][6] = 34, - [0][1][2][1][8][6] = 36, - [0][1][2][1][11][6] = 34, - [0][1][2][1][2][7] = 68, - [0][1][2][1][1][7] = 34, - [0][1][2][1][3][7] = 64, - [0][1][2][1][5][7] = 68, - [0][1][2][1][6][7] = 34, - [0][1][2][1][9][7] = 34, - [0][1][2][1][8][7] = 36, - [0][1][2][1][11][7] = 34, - [0][1][2][1][2][8] = 64, - [0][1][2][1][1][8] = 34, - [0][1][2][1][3][8] = 64, - [0][1][2][1][5][8] = 64, - [0][1][2][1][6][8] = 34, - [0][1][2][1][9][8] = 34, - [0][1][2][1][8][8] = 36, - [0][1][2][1][11][8] = 34, - [0][1][2][1][2][9] = 60, - [0][1][2][1][1][9] = 34, - [0][1][2][1][3][9] = 64, - [0][1][2][1][5][9] = 60, - [0][1][2][1][6][9] = 34, - [0][1][2][1][9][9] = 34, - [0][1][2][1][8][9] = 36, - [0][1][2][1][11][9] = 34, - [0][1][2][1][2][10] = 60, - [0][1][2][1][1][10] = 34, - [0][1][2][1][3][10] = 64, - [0][1][2][1][5][10] = 60, - [0][1][2][1][6][10] = 34, - [0][1][2][1][9][10] = 34, - [0][1][2][1][8][10] = 36, - [0][1][2][1][11][10] = 34, - [0][1][2][1][2][11] = 42, - [0][1][2][1][1][11] = 34, - [0][1][2][1][3][11] = 64, - [0][1][2][1][5][11] = 42, - [0][1][2][1][6][11] = 34, - [0][1][2][1][9][11] = 34, - [0][1][2][1][8][11] = 36, - [0][1][2][1][11][11] = 34, - [0][1][2][1][2][12] = 40, - [0][1][2][1][1][12] = 34, - [0][1][2][1][3][12] = 64, - [0][1][2][1][5][12] = 40, - [0][1][2][1][6][12] = 34, - [0][1][2][1][9][12] = 34, - [0][1][2][1][8][12] = 36, - [0][1][2][1][11][12] = 34, - [0][1][2][1][2][13] = 127, - [0][1][2][1][1][13] = 127, - [0][1][2][1][3][13] = 127, - [0][1][2][1][5][13] = 127, - [0][1][2][1][6][13] = 127, - [0][1][2][1][9][13] = 127, - [0][1][2][1][8][13] = 127, - [0][1][2][1][11][13] = 127, - [1][0][2][0][2][0] = 127, - [1][0][2][0][1][0] = 127, - [1][0][2][0][3][0] = 127, - [1][0][2][0][5][0] = 127, - [1][0][2][0][6][0] = 127, - [1][0][2][0][9][0] = 127, - [1][0][2][0][8][0] = 127, - [1][0][2][0][11][0] = 127, - [1][0][2][0][2][1] = 127, - [1][0][2][0][1][1] = 127, - [1][0][2][0][3][1] = 127, - [1][0][2][0][5][1] = 127, - [1][0][2][0][6][1] = 127, - [1][0][2][0][9][1] = 127, - [1][0][2][0][8][1] = 127, - [1][0][2][0][11][1] = 127, - [1][0][2][0][2][2] = 56, - [1][0][2][0][1][2] = 58, - [1][0][2][0][3][2] = 76, - [1][0][2][0][5][2] = 56, - [1][0][2][0][6][2] = 58, - [1][0][2][0][9][2] = 58, - [1][0][2][0][8][2] = 60, - [1][0][2][0][11][2] = 58, - [1][0][2][0][2][3] = 56, - [1][0][2][0][1][3] = 58, - [1][0][2][0][3][3] = 76, - [1][0][2][0][5][3] = 56, - [1][0][2][0][6][3] = 58, - [1][0][2][0][9][3] = 58, - [1][0][2][0][8][3] = 60, - [1][0][2][0][11][3] = 58, - [1][0][2][0][2][4] = 60, - [1][0][2][0][1][4] = 58, - [1][0][2][0][3][4] = 76, - [1][0][2][0][5][4] = 60, - [1][0][2][0][6][4] = 58, - [1][0][2][0][9][4] = 58, - [1][0][2][0][8][4] = 60, - [1][0][2][0][11][4] = 58, - [1][0][2][0][2][5] = 64, - [1][0][2][0][1][5] = 58, - [1][0][2][0][3][5] = 76, - [1][0][2][0][5][5] = 64, - [1][0][2][0][6][5] = 58, - [1][0][2][0][9][5] = 58, - [1][0][2][0][8][5] = 60, - [1][0][2][0][11][5] = 58, - [1][0][2][0][2][6] = 54, - [1][0][2][0][1][6] = 58, - [1][0][2][0][3][6] = 76, - [1][0][2][0][5][6] = 54, - [1][0][2][0][6][6] = 58, - [1][0][2][0][9][6] = 58, - [1][0][2][0][8][6] = 60, - [1][0][2][0][11][6] = 58, - [1][0][2][0][2][7] = 50, - [1][0][2][0][1][7] = 58, - [1][0][2][0][3][7] = 76, - [1][0][2][0][5][7] = 50, - [1][0][2][0][6][7] = 58, - [1][0][2][0][9][7] = 58, - [1][0][2][0][8][7] = 60, - [1][0][2][0][11][7] = 58, - [1][0][2][0][2][8] = 50, - [1][0][2][0][1][8] = 58, - [1][0][2][0][3][8] = 76, - [1][0][2][0][5][8] = 50, - [1][0][2][0][6][8] = 58, - [1][0][2][0][9][8] = 58, - [1][0][2][0][8][8] = 60, - [1][0][2][0][11][8] = 58, - [1][0][2][0][2][9] = 42, - [1][0][2][0][1][9] = 58, - [1][0][2][0][3][9] = 76, - [1][0][2][0][5][9] = 42, - [1][0][2][0][6][9] = 58, - [1][0][2][0][9][9] = 58, - [1][0][2][0][8][9] = 60, - [1][0][2][0][11][9] = 58, - [1][0][2][0][2][10] = 40, - [1][0][2][0][1][10] = 58, - [1][0][2][0][3][10] = 76, - [1][0][2][0][5][10] = 40, - [1][0][2][0][6][10] = 58, - [1][0][2][0][9][10] = 58, - [1][0][2][0][8][10] = 60, - [1][0][2][0][11][10] = 58, - [1][0][2][0][2][11] = 127, - [1][0][2][0][1][11] = 127, - [1][0][2][0][3][11] = 127, - [1][0][2][0][5][11] = 127, - [1][0][2][0][6][11] = 127, - [1][0][2][0][9][11] = 127, - [1][0][2][0][8][11] = 127, - [1][0][2][0][11][11] = 127, - [1][0][2][0][2][12] = 127, - [1][0][2][0][1][12] = 127, - [1][0][2][0][3][12] = 127, - [1][0][2][0][5][12] = 127, - [1][0][2][0][6][12] = 127, - [1][0][2][0][9][12] = 127, - [1][0][2][0][8][12] = 127, - [1][0][2][0][11][12] = 127, - [1][0][2][0][2][13] = 127, - [1][0][2][0][1][13] = 127, - [1][0][2][0][3][13] = 127, - [1][0][2][0][5][13] = 127, - [1][0][2][0][6][13] = 127, - [1][0][2][0][9][13] = 127, - [1][0][2][0][8][13] = 127, - [1][0][2][0][11][13] = 127, - [1][1][2][0][2][0] = 127, - [1][1][2][0][1][0] = 127, - [1][1][2][0][3][0] = 127, - [1][1][2][0][5][0] = 127, - [1][1][2][0][6][0] = 127, - [1][1][2][0][9][0] = 127, - [1][1][2][0][8][0] = 127, - [1][1][2][0][11][0] = 127, - [1][1][2][0][2][1] = 127, - [1][1][2][0][1][1] = 127, - [1][1][2][0][3][1] = 127, - [1][1][2][0][5][1] = 127, - [1][1][2][0][6][1] = 127, - [1][1][2][0][9][1] = 127, - [1][1][2][0][8][1] = 127, - [1][1][2][0][11][1] = 127, - [1][1][2][0][2][2] = 52, - [1][1][2][0][1][2] = 46, - [1][1][2][0][3][2] = 64, - [1][1][2][0][5][2] = 52, - [1][1][2][0][6][2] = 46, - [1][1][2][0][9][2] = 46, - [1][1][2][0][8][2] = 48, - [1][1][2][0][11][2] = 46, - [1][1][2][0][2][3] = 52, - [1][1][2][0][1][3] = 46, - [1][1][2][0][3][3] = 64, - [1][1][2][0][5][3] = 52, - [1][1][2][0][6][3] = 46, - [1][1][2][0][9][3] = 46, - [1][1][2][0][8][3] = 48, - [1][1][2][0][11][3] = 46, - [1][1][2][0][2][4] = 56, - [1][1][2][0][1][4] = 46, - [1][1][2][0][3][4] = 64, - [1][1][2][0][5][4] = 56, - [1][1][2][0][6][4] = 46, - [1][1][2][0][9][4] = 46, - [1][1][2][0][8][4] = 48, - [1][1][2][0][11][4] = 46, - [1][1][2][0][2][5] = 60, - [1][1][2][0][1][5] = 46, - [1][1][2][0][3][5] = 64, - [1][1][2][0][5][5] = 60, - [1][1][2][0][6][5] = 46, - [1][1][2][0][9][5] = 46, - [1][1][2][0][8][5] = 48, - [1][1][2][0][11][5] = 46, - [1][1][2][0][2][6] = 54, - [1][1][2][0][1][6] = 46, - [1][1][2][0][3][6] = 64, - [1][1][2][0][5][6] = 52, - [1][1][2][0][6][6] = 46, - [1][1][2][0][9][6] = 46, - [1][1][2][0][8][6] = 48, - [1][1][2][0][11][6] = 46, - [1][1][2][0][2][7] = 50, - [1][1][2][0][1][7] = 46, - [1][1][2][0][3][7] = 64, - [1][1][2][0][5][7] = 48, - [1][1][2][0][6][7] = 46, - [1][1][2][0][9][7] = 46, - [1][1][2][0][8][7] = 48, - [1][1][2][0][11][7] = 46, - [1][1][2][0][2][8] = 50, - [1][1][2][0][1][8] = 46, - [1][1][2][0][3][8] = 64, - [1][1][2][0][5][8] = 48, - [1][1][2][0][6][8] = 46, - [1][1][2][0][9][8] = 46, - [1][1][2][0][8][8] = 48, - [1][1][2][0][11][8] = 46, - [1][1][2][0][2][9] = 38, - [1][1][2][0][1][9] = 46, - [1][1][2][0][3][9] = 64, - [1][1][2][0][5][9] = 38, - [1][1][2][0][6][9] = 46, - [1][1][2][0][9][9] = 46, - [1][1][2][0][8][9] = 48, - [1][1][2][0][11][9] = 46, - [1][1][2][0][2][10] = 36, - [1][1][2][0][1][10] = 46, - [1][1][2][0][3][10] = 64, - [1][1][2][0][5][10] = 36, - [1][1][2][0][6][10] = 46, - [1][1][2][0][9][10] = 46, - [1][1][2][0][8][10] = 48, - [1][1][2][0][11][10] = 46, - [1][1][2][0][2][11] = 127, - [1][1][2][0][1][11] = 127, - [1][1][2][0][3][11] = 127, - [1][1][2][0][5][11] = 127, - [1][1][2][0][6][11] = 127, - [1][1][2][0][9][11] = 127, - [1][1][2][0][8][11] = 127, - [1][1][2][0][11][11] = 127, - [1][1][2][0][2][12] = 127, - [1][1][2][0][1][12] = 127, - [1][1][2][0][3][12] = 127, - [1][1][2][0][5][12] = 127, - [1][1][2][0][6][12] = 127, - [1][1][2][0][9][12] = 127, - [1][1][2][0][8][12] = 127, - [1][1][2][0][11][12] = 127, - [1][1][2][0][2][13] = 127, - [1][1][2][0][1][13] = 127, - [1][1][2][0][3][13] = 127, - [1][1][2][0][5][13] = 127, - [1][1][2][0][6][13] = 127, - [1][1][2][0][9][13] = 127, - [1][1][2][0][8][13] = 127, - [1][1][2][0][11][13] = 127, - [1][1][2][1][2][0] = 127, - [1][1][2][1][1][0] = 127, - [1][1][2][1][3][0] = 127, - [1][1][2][1][5][0] = 127, - [1][1][2][1][6][0] = 127, - [1][1][2][1][9][0] = 127, - [1][1][2][1][8][0] = 127, - [1][1][2][1][11][0] = 127, - [1][1][2][1][2][1] = 127, - [1][1][2][1][1][1] = 127, - [1][1][2][1][3][1] = 127, - [1][1][2][1][5][1] = 127, - [1][1][2][1][6][1] = 127, - [1][1][2][1][9][1] = 127, - [1][1][2][1][8][1] = 127, - [1][1][2][1][11][1] = 127, - [1][1][2][1][2][2] = 52, - [1][1][2][1][1][2] = 34, - [1][1][2][1][3][2] = 64, - [1][1][2][1][5][2] = 52, - [1][1][2][1][6][2] = 34, - [1][1][2][1][9][2] = 34, - [1][1][2][1][8][2] = 36, - [1][1][2][1][11][2] = 34, - [1][1][2][1][2][3] = 52, - [1][1][2][1][1][3] = 34, - [1][1][2][1][3][3] = 64, - [1][1][2][1][5][3] = 52, - [1][1][2][1][6][3] = 34, - [1][1][2][1][9][3] = 34, - [1][1][2][1][8][3] = 36, - [1][1][2][1][11][3] = 34, - [1][1][2][1][2][4] = 56, - [1][1][2][1][1][4] = 34, - [1][1][2][1][3][4] = 64, - [1][1][2][1][5][4] = 56, - [1][1][2][1][6][4] = 34, - [1][1][2][1][9][4] = 34, - [1][1][2][1][8][4] = 36, - [1][1][2][1][11][4] = 34, - [1][1][2][1][2][5] = 60, - [1][1][2][1][1][5] = 34, - [1][1][2][1][3][5] = 64, - [1][1][2][1][5][5] = 60, - [1][1][2][1][6][5] = 34, - [1][1][2][1][9][5] = 34, - [1][1][2][1][8][5] = 36, - [1][1][2][1][11][5] = 34, - [1][1][2][1][2][6] = 54, - [1][1][2][1][1][6] = 34, - [1][1][2][1][3][6] = 64, - [1][1][2][1][5][6] = 52, - [1][1][2][1][6][6] = 34, - [1][1][2][1][9][6] = 34, - [1][1][2][1][8][6] = 36, - [1][1][2][1][11][6] = 34, - [1][1][2][1][2][7] = 50, - [1][1][2][1][1][7] = 34, - [1][1][2][1][3][7] = 64, - [1][1][2][1][5][7] = 48, - [1][1][2][1][6][7] = 34, - [1][1][2][1][9][7] = 34, - [1][1][2][1][8][7] = 36, - [1][1][2][1][11][7] = 34, - [1][1][2][1][2][8] = 50, - [1][1][2][1][1][8] = 34, - [1][1][2][1][3][8] = 64, - [1][1][2][1][5][8] = 48, - [1][1][2][1][6][8] = 34, - [1][1][2][1][9][8] = 34, - [1][1][2][1][8][8] = 36, - [1][1][2][1][11][8] = 34, - [1][1][2][1][2][9] = 38, - [1][1][2][1][1][9] = 34, - [1][1][2][1][3][9] = 64, - [1][1][2][1][5][9] = 38, - [1][1][2][1][6][9] = 34, - [1][1][2][1][9][9] = 34, - [1][1][2][1][8][9] = 36, - [1][1][2][1][11][9] = 34, - [1][1][2][1][2][10] = 36, - [1][1][2][1][1][10] = 34, - [1][1][2][1][3][10] = 64, - [1][1][2][1][5][10] = 36, - [1][1][2][1][6][10] = 34, - [1][1][2][1][9][10] = 34, - [1][1][2][1][8][10] = 36, - [1][1][2][1][11][10] = 34, - [1][1][2][1][2][11] = 127, - [1][1][2][1][1][11] = 127, - [1][1][2][1][3][11] = 127, - [1][1][2][1][5][11] = 127, - [1][1][2][1][6][11] = 127, - [1][1][2][1][9][11] = 127, - [1][1][2][1][8][11] = 127, - [1][1][2][1][11][11] = 127, - [1][1][2][1][2][12] = 127, - [1][1][2][1][1][12] = 127, - [1][1][2][1][3][12] = 127, - [1][1][2][1][5][12] = 127, - [1][1][2][1][6][12] = 127, - [1][1][2][1][9][12] = 127, - [1][1][2][1][8][12] = 127, - [1][1][2][1][11][12] = 127, - [1][1][2][1][2][13] = 127, - [1][1][2][1][1][13] = 127, - [1][1][2][1][3][13] = 127, - [1][1][2][1][5][13] = 127, - [1][1][2][1][6][13] = 127, - [1][1][2][1][9][13] = 127, - [1][1][2][1][8][13] = 127, - [1][1][2][1][11][13] = 127, + [0][0][0][0][RTW89_WW][0] = 56, + [0][0][0][0][RTW89_WW][1] = 56, + [0][0][0][0][RTW89_WW][2] = 56, + [0][0][0][0][RTW89_WW][3] = 56, + [0][0][0][0][RTW89_WW][4] = 56, + [0][0][0][0][RTW89_WW][5] = 56, + [0][0][0][0][RTW89_WW][6] = 56, + [0][0][0][0][RTW89_WW][7] = 56, + [0][0][0][0][RTW89_WW][8] = 56, + [0][0][0][0][RTW89_WW][9] = 56, + [0][0][0][0][RTW89_WW][10] = 56, + [0][0][0][0][RTW89_WW][11] = 56, + [0][0][0][0][RTW89_WW][12] = 48, + [0][0][0][0][RTW89_WW][13] = 76, + [0][1][0][0][RTW89_WW][0] = 44, + [0][1][0][0][RTW89_WW][1] = 44, + [0][1][0][0][RTW89_WW][2] = 44, + [0][1][0][0][RTW89_WW][3] = 44, + [0][1][0][0][RTW89_WW][4] = 44, + [0][1][0][0][RTW89_WW][5] = 44, + [0][1][0][0][RTW89_WW][6] = 44, + [0][1][0][0][RTW89_WW][7] = 44, + [0][1][0][0][RTW89_WW][8] = 44, + [0][1][0][0][RTW89_WW][9] = 44, + [0][1][0][0][RTW89_WW][10] = 44, + [0][1][0][0][RTW89_WW][11] = 44, + [0][1][0][0][RTW89_WW][12] = 38, + [0][1][0][0][RTW89_WW][13] = 64, + [1][0][0][0][RTW89_WW][0] = 0, + [1][0][0][0][RTW89_WW][1] = 0, + [1][0][0][0][RTW89_WW][2] = 58, + [1][0][0][0][RTW89_WW][3] = 58, + [1][0][0][0][RTW89_WW][4] = 58, + [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][6] = 46, + [1][0][0][0][RTW89_WW][7] = 46, + [1][0][0][0][RTW89_WW][8] = 46, + [1][0][0][0][RTW89_WW][9] = 32, + [1][0][0][0][RTW89_WW][10] = 32, + [1][0][0][0][RTW89_WW][11] = 0, + [1][0][0][0][RTW89_WW][12] = 0, + [1][0][0][0][RTW89_WW][13] = 0, + [1][1][0][0][RTW89_WW][0] = 0, + [1][1][0][0][RTW89_WW][1] = 0, + [1][1][0][0][RTW89_WW][2] = 46, + [1][1][0][0][RTW89_WW][3] = 46, + [1][1][0][0][RTW89_WW][4] = 46, + [1][1][0][0][RTW89_WW][5] = 46, + [1][1][0][0][RTW89_WW][6] = 46, + [1][1][0][0][RTW89_WW][7] = 46, + [1][1][0][0][RTW89_WW][8] = 46, + [1][1][0][0][RTW89_WW][9] = 24, + [1][1][0][0][RTW89_WW][10] = 24, + [1][1][0][0][RTW89_WW][11] = 0, + [1][1][0][0][RTW89_WW][12] = 0, + [1][1][0][0][RTW89_WW][13] = 0, + [0][0][1][0][RTW89_WW][0] = 58, + [0][0][1][0][RTW89_WW][1] = 58, + [0][0][1][0][RTW89_WW][2] = 58, + [0][0][1][0][RTW89_WW][3] = 58, + [0][0][1][0][RTW89_WW][4] = 58, + [0][0][1][0][RTW89_WW][5] = 58, + [0][0][1][0][RTW89_WW][6] = 58, + [0][0][1][0][RTW89_WW][7] = 58, + [0][0][1][0][RTW89_WW][8] = 58, + [0][0][1][0][RTW89_WW][9] = 58, + [0][0][1][0][RTW89_WW][10] = 58, + [0][0][1][0][RTW89_WW][11] = 56, + [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][13] = 0, + [0][1][1][0][RTW89_WW][0] = 46, + [0][1][1][0][RTW89_WW][1] = 46, + [0][1][1][0][RTW89_WW][2] = 46, + [0][1][1][0][RTW89_WW][3] = 46, + [0][1][1][0][RTW89_WW][4] = 46, + [0][1][1][0][RTW89_WW][5] = 46, + [0][1][1][0][RTW89_WW][6] = 46, + [0][1][1][0][RTW89_WW][7] = 46, + [0][1][1][0][RTW89_WW][8] = 46, + [0][1][1][0][RTW89_WW][9] = 46, + [0][1][1][0][RTW89_WW][10] = 46, + [0][1][1][0][RTW89_WW][11] = 42, + [0][1][1][0][RTW89_WW][12] = 40, + [0][1][1][0][RTW89_WW][13] = 0, + [0][0][2][0][RTW89_WW][0] = 58, + [0][0][2][0][RTW89_WW][1] = 58, + [0][0][2][0][RTW89_WW][2] = 58, + [0][0][2][0][RTW89_WW][3] = 58, + [0][0][2][0][RTW89_WW][4] = 58, + [0][0][2][0][RTW89_WW][5] = 58, + [0][0][2][0][RTW89_WW][6] = 58, + [0][0][2][0][RTW89_WW][7] = 58, + [0][0][2][0][RTW89_WW][8] = 58, + [0][0][2][0][RTW89_WW][9] = 58, + [0][0][2][0][RTW89_WW][10] = 58, + [0][0][2][0][RTW89_WW][11] = 54, + [0][0][2][0][RTW89_WW][12] = 50, + [0][0][2][0][RTW89_WW][13] = 0, + [0][1][2][0][RTW89_WW][0] = 46, + [0][1][2][0][RTW89_WW][1] = 46, + [0][1][2][0][RTW89_WW][2] = 46, + [0][1][2][0][RTW89_WW][3] = 46, + [0][1][2][0][RTW89_WW][4] = 46, + [0][1][2][0][RTW89_WW][5] = 46, + [0][1][2][0][RTW89_WW][6] = 46, + [0][1][2][0][RTW89_WW][7] = 46, + [0][1][2][0][RTW89_WW][8] = 46, + [0][1][2][0][RTW89_WW][9] = 46, + [0][1][2][0][RTW89_WW][10] = 46, + [0][1][2][0][RTW89_WW][11] = 42, + [0][1][2][0][RTW89_WW][12] = 40, + [0][1][2][0][RTW89_WW][13] = 0, + [0][1][2][1][RTW89_WW][0] = 34, + [0][1][2][1][RTW89_WW][1] = 34, + [0][1][2][1][RTW89_WW][2] = 34, + [0][1][2][1][RTW89_WW][3] = 34, + [0][1][2][1][RTW89_WW][4] = 34, + [0][1][2][1][RTW89_WW][5] = 34, + [0][1][2][1][RTW89_WW][6] = 34, + [0][1][2][1][RTW89_WW][7] = 34, + [0][1][2][1][RTW89_WW][8] = 34, + [0][1][2][1][RTW89_WW][9] = 34, + [0][1][2][1][RTW89_WW][10] = 34, + [0][1][2][1][RTW89_WW][11] = 34, + [0][1][2][1][RTW89_WW][12] = 34, + [0][1][2][1][RTW89_WW][13] = 0, + [1][0][2][0][RTW89_WW][0] = 0, + [1][0][2][0][RTW89_WW][1] = 0, + [1][0][2][0][RTW89_WW][2] = 56, + [1][0][2][0][RTW89_WW][3] = 56, + [1][0][2][0][RTW89_WW][4] = 58, + [1][0][2][0][RTW89_WW][5] = 58, + [1][0][2][0][RTW89_WW][6] = 54, + [1][0][2][0][RTW89_WW][7] = 50, + [1][0][2][0][RTW89_WW][8] = 50, + [1][0][2][0][RTW89_WW][9] = 42, + [1][0][2][0][RTW89_WW][10] = 40, + [1][0][2][0][RTW89_WW][11] = 0, + [1][0][2][0][RTW89_WW][12] = 0, + [1][0][2][0][RTW89_WW][13] = 0, + [1][1][2][0][RTW89_WW][0] = 0, + [1][1][2][0][RTW89_WW][1] = 0, + [1][1][2][0][RTW89_WW][2] = 46, + [1][1][2][0][RTW89_WW][3] = 46, + [1][1][2][0][RTW89_WW][4] = 46, + [1][1][2][0][RTW89_WW][5] = 46, + [1][1][2][0][RTW89_WW][6] = 46, + [1][1][2][0][RTW89_WW][7] = 46, + [1][1][2][0][RTW89_WW][8] = 46, + [1][1][2][0][RTW89_WW][9] = 38, + [1][1][2][0][RTW89_WW][10] = 36, + [1][1][2][0][RTW89_WW][11] = 0, + [1][1][2][0][RTW89_WW][12] = 0, + [1][1][2][0][RTW89_WW][13] = 0, + [1][1][2][1][RTW89_WW][0] = 0, + [1][1][2][1][RTW89_WW][1] = 0, + [1][1][2][1][RTW89_WW][2] = 34, + [1][1][2][1][RTW89_WW][3] = 34, + [1][1][2][1][RTW89_WW][4] = 34, + [1][1][2][1][RTW89_WW][5] = 34, + [1][1][2][1][RTW89_WW][6] = 34, + [1][1][2][1][RTW89_WW][7] = 34, + [1][1][2][1][RTW89_WW][8] = 34, + [1][1][2][1][RTW89_WW][9] = 34, + [1][1][2][1][RTW89_WW][10] = 34, + [1][1][2][1][RTW89_WW][11] = 0, + [1][1][2][1][RTW89_WW][12] = 0, + [1][1][2][1][RTW89_WW][13] = 0, + [0][0][0][0][RTW89_FCC][0] = 76, + [0][0][0][0][RTW89_ETSI][0] = 56, + [0][0][0][0][RTW89_MKK][0] = 68, + [0][0][0][0][RTW89_IC][0] = 76, + [0][0][0][0][RTW89_KCC][0] = 76, + [0][0][0][0][RTW89_ACMA][0] = 56, + [0][0][0][0][RTW89_CHILE][0] = 60, + [0][0][0][0][RTW89_UKRAINE][0] = 56, + [0][0][0][0][RTW89_MEXICO][0] = 76, + [0][0][0][0][RTW89_CN][0] = 56, + [0][0][0][0][RTW89_QATAR][0] = 56, + [0][0][0][0][RTW89_FCC][1] = 76, + [0][0][0][0][RTW89_ETSI][1] = 56, + [0][0][0][0][RTW89_MKK][1] = 68, + [0][0][0][0][RTW89_IC][1] = 76, + [0][0][0][0][RTW89_KCC][1] = 76, + [0][0][0][0][RTW89_ACMA][1] = 56, + [0][0][0][0][RTW89_CHILE][1] = 60, + [0][0][0][0][RTW89_UKRAINE][1] = 56, + [0][0][0][0][RTW89_MEXICO][1] = 76, + [0][0][0][0][RTW89_CN][1] = 56, + [0][0][0][0][RTW89_QATAR][1] = 56, + [0][0][0][0][RTW89_FCC][2] = 76, + [0][0][0][0][RTW89_ETSI][2] = 56, + [0][0][0][0][RTW89_MKK][2] = 68, + [0][0][0][0][RTW89_IC][2] = 76, + [0][0][0][0][RTW89_KCC][2] = 76, + [0][0][0][0][RTW89_ACMA][2] = 56, + [0][0][0][0][RTW89_CHILE][2] = 60, + [0][0][0][0][RTW89_UKRAINE][2] = 56, + [0][0][0][0][RTW89_MEXICO][2] = 76, + [0][0][0][0][RTW89_CN][2] = 56, + [0][0][0][0][RTW89_QATAR][2] = 56, + [0][0][0][0][RTW89_FCC][3] = 76, + [0][0][0][0][RTW89_ETSI][3] = 56, + [0][0][0][0][RTW89_MKK][3] = 68, + [0][0][0][0][RTW89_IC][3] = 76, + [0][0][0][0][RTW89_KCC][3] = 76, + [0][0][0][0][RTW89_ACMA][3] = 56, + [0][0][0][0][RTW89_CHILE][3] = 60, + [0][0][0][0][RTW89_UKRAINE][3] = 56, + [0][0][0][0][RTW89_MEXICO][3] = 76, + [0][0][0][0][RTW89_CN][3] = 56, + [0][0][0][0][RTW89_QATAR][3] = 56, + [0][0][0][0][RTW89_FCC][4] = 76, + [0][0][0][0][RTW89_ETSI][4] = 56, + [0][0][0][0][RTW89_MKK][4] = 68, + [0][0][0][0][RTW89_IC][4] = 76, + [0][0][0][0][RTW89_KCC][4] = 76, + [0][0][0][0][RTW89_ACMA][4] = 56, + [0][0][0][0][RTW89_CHILE][4] = 60, + [0][0][0][0][RTW89_UKRAINE][4] = 56, + [0][0][0][0][RTW89_MEXICO][4] = 76, + [0][0][0][0][RTW89_CN][4] = 56, + [0][0][0][0][RTW89_QATAR][4] = 56, + [0][0][0][0][RTW89_FCC][5] = 76, + [0][0][0][0][RTW89_ETSI][5] = 56, + [0][0][0][0][RTW89_MKK][5] = 68, + [0][0][0][0][RTW89_IC][5] = 76, + [0][0][0][0][RTW89_KCC][5] = 76, + [0][0][0][0][RTW89_ACMA][5] = 56, + [0][0][0][0][RTW89_CHILE][5] = 60, + [0][0][0][0][RTW89_UKRAINE][5] = 56, + [0][0][0][0][RTW89_MEXICO][5] = 76, + [0][0][0][0][RTW89_CN][5] = 56, + [0][0][0][0][RTW89_QATAR][5] = 56, + [0][0][0][0][RTW89_FCC][6] = 76, + [0][0][0][0][RTW89_ETSI][6] = 56, + [0][0][0][0][RTW89_MKK][6] = 68, + [0][0][0][0][RTW89_IC][6] = 76, + [0][0][0][0][RTW89_KCC][6] = 76, + [0][0][0][0][RTW89_ACMA][6] = 56, + [0][0][0][0][RTW89_CHILE][6] = 60, + [0][0][0][0][RTW89_UKRAINE][6] = 56, + [0][0][0][0][RTW89_MEXICO][6] = 76, + [0][0][0][0][RTW89_CN][6] = 56, + [0][0][0][0][RTW89_QATAR][6] = 56, + [0][0][0][0][RTW89_FCC][7] = 76, + [0][0][0][0][RTW89_ETSI][7] = 56, + [0][0][0][0][RTW89_MKK][7] = 68, + [0][0][0][0][RTW89_IC][7] = 76, + [0][0][0][0][RTW89_KCC][7] = 76, + [0][0][0][0][RTW89_ACMA][7] = 56, + [0][0][0][0][RTW89_CHILE][7] = 60, + [0][0][0][0][RTW89_UKRAINE][7] = 56, + [0][0][0][0][RTW89_MEXICO][7] = 76, + [0][0][0][0][RTW89_CN][7] = 56, + [0][0][0][0][RTW89_QATAR][7] = 56, + [0][0][0][0][RTW89_FCC][8] = 76, + [0][0][0][0][RTW89_ETSI][8] = 56, + [0][0][0][0][RTW89_MKK][8] = 68, + [0][0][0][0][RTW89_IC][8] = 76, + [0][0][0][0][RTW89_KCC][8] = 76, + [0][0][0][0][RTW89_ACMA][8] = 56, + [0][0][0][0][RTW89_CHILE][8] = 60, + [0][0][0][0][RTW89_UKRAINE][8] = 56, + [0][0][0][0][RTW89_MEXICO][8] = 76, + [0][0][0][0][RTW89_CN][8] = 56, + [0][0][0][0][RTW89_QATAR][8] = 56, + [0][0][0][0][RTW89_FCC][9] = 76, + [0][0][0][0][RTW89_ETSI][9] = 56, + [0][0][0][0][RTW89_MKK][9] = 68, + [0][0][0][0][RTW89_IC][9] = 76, + [0][0][0][0][RTW89_KCC][9] = 76, + [0][0][0][0][RTW89_ACMA][9] = 56, + [0][0][0][0][RTW89_CHILE][9] = 60, + [0][0][0][0][RTW89_UKRAINE][9] = 56, + [0][0][0][0][RTW89_MEXICO][9] = 76, + [0][0][0][0][RTW89_CN][9] = 56, + [0][0][0][0][RTW89_QATAR][9] = 56, + [0][0][0][0][RTW89_FCC][10] = 76, + [0][0][0][0][RTW89_ETSI][10] = 56, + [0][0][0][0][RTW89_MKK][10] = 68, + [0][0][0][0][RTW89_IC][10] = 76, + [0][0][0][0][RTW89_KCC][10] = 76, + [0][0][0][0][RTW89_ACMA][10] = 56, + [0][0][0][0][RTW89_CHILE][10] = 60, + [0][0][0][0][RTW89_UKRAINE][10] = 56, + [0][0][0][0][RTW89_MEXICO][10] = 76, + [0][0][0][0][RTW89_CN][10] = 56, + [0][0][0][0][RTW89_QATAR][10] = 56, + [0][0][0][0][RTW89_FCC][11] = 68, + [0][0][0][0][RTW89_ETSI][11] = 56, + [0][0][0][0][RTW89_MKK][11] = 68, + [0][0][0][0][RTW89_IC][11] = 68, + [0][0][0][0][RTW89_KCC][11] = 76, + [0][0][0][0][RTW89_ACMA][11] = 56, + [0][0][0][0][RTW89_CHILE][11] = 60, + [0][0][0][0][RTW89_UKRAINE][11] = 56, + [0][0][0][0][RTW89_MEXICO][11] = 68, + [0][0][0][0][RTW89_CN][11] = 56, + [0][0][0][0][RTW89_QATAR][11] = 56, + [0][0][0][0][RTW89_FCC][12] = 48, + [0][0][0][0][RTW89_ETSI][12] = 56, + [0][0][0][0][RTW89_MKK][12] = 68, + [0][0][0][0][RTW89_IC][12] = 48, + [0][0][0][0][RTW89_KCC][12] = 76, + [0][0][0][0][RTW89_ACMA][12] = 56, + [0][0][0][0][RTW89_CHILE][12] = 48, + [0][0][0][0][RTW89_UKRAINE][12] = 56, + [0][0][0][0][RTW89_MEXICO][12] = 48, + [0][0][0][0][RTW89_CN][12] = 56, + [0][0][0][0][RTW89_QATAR][12] = 56, + [0][0][0][0][RTW89_FCC][13] = 127, + [0][0][0][0][RTW89_ETSI][13] = 127, + [0][0][0][0][RTW89_MKK][13] = 76, + [0][0][0][0][RTW89_IC][13] = 127, + [0][0][0][0][RTW89_KCC][13] = 127, + [0][0][0][0][RTW89_ACMA][13] = 127, + [0][0][0][0][RTW89_CHILE][13] = 127, + [0][0][0][0][RTW89_UKRAINE][13] = 127, + [0][0][0][0][RTW89_MEXICO][13] = 127, + [0][0][0][0][RTW89_CN][13] = 127, + [0][0][0][0][RTW89_QATAR][13] = 127, + [0][1][0][0][RTW89_FCC][0] = 74, + [0][1][0][0][RTW89_ETSI][0] = 44, + [0][1][0][0][RTW89_MKK][0] = 56, + [0][1][0][0][RTW89_IC][0] = 74, + [0][1][0][0][RTW89_KCC][0] = 68, + [0][1][0][0][RTW89_ACMA][0] = 44, + [0][1][0][0][RTW89_CHILE][0] = 48, + [0][1][0][0][RTW89_UKRAINE][0] = 44, + [0][1][0][0][RTW89_MEXICO][0] = 74, + [0][1][0][0][RTW89_CN][0] = 44, + [0][1][0][0][RTW89_QATAR][0] = 44, + [0][1][0][0][RTW89_FCC][1] = 76, + [0][1][0][0][RTW89_ETSI][1] = 44, + [0][1][0][0][RTW89_MKK][1] = 56, + [0][1][0][0][RTW89_IC][1] = 76, + [0][1][0][0][RTW89_KCC][1] = 68, + [0][1][0][0][RTW89_ACMA][1] = 44, + [0][1][0][0][RTW89_CHILE][1] = 48, + [0][1][0][0][RTW89_UKRAINE][1] = 44, + [0][1][0][0][RTW89_MEXICO][1] = 76, + [0][1][0][0][RTW89_CN][1] = 44, + [0][1][0][0][RTW89_QATAR][1] = 44, + [0][1][0][0][RTW89_FCC][2] = 76, + [0][1][0][0][RTW89_ETSI][2] = 44, + [0][1][0][0][RTW89_MKK][2] = 56, + [0][1][0][0][RTW89_IC][2] = 76, + [0][1][0][0][RTW89_KCC][2] = 68, + [0][1][0][0][RTW89_ACMA][2] = 44, + [0][1][0][0][RTW89_CHILE][2] = 48, + [0][1][0][0][RTW89_UKRAINE][2] = 44, + [0][1][0][0][RTW89_MEXICO][2] = 76, + [0][1][0][0][RTW89_CN][2] = 44, + [0][1][0][0][RTW89_QATAR][2] = 44, + [0][1][0][0][RTW89_FCC][3] = 76, + [0][1][0][0][RTW89_ETSI][3] = 44, + [0][1][0][0][RTW89_MKK][3] = 56, + [0][1][0][0][RTW89_IC][3] = 76, + [0][1][0][0][RTW89_KCC][3] = 68, + [0][1][0][0][RTW89_ACMA][3] = 44, + [0][1][0][0][RTW89_CHILE][3] = 48, + [0][1][0][0][RTW89_UKRAINE][3] = 44, + [0][1][0][0][RTW89_MEXICO][3] = 76, + [0][1][0][0][RTW89_CN][3] = 44, + [0][1][0][0][RTW89_QATAR][3] = 44, + [0][1][0][0][RTW89_FCC][4] = 76, + [0][1][0][0][RTW89_ETSI][4] = 44, + [0][1][0][0][RTW89_MKK][4] = 56, + [0][1][0][0][RTW89_IC][4] = 76, + [0][1][0][0][RTW89_KCC][4] = 68, + [0][1][0][0][RTW89_ACMA][4] = 44, + [0][1][0][0][RTW89_CHILE][4] = 48, + [0][1][0][0][RTW89_UKRAINE][4] = 44, + [0][1][0][0][RTW89_MEXICO][4] = 76, + [0][1][0][0][RTW89_CN][4] = 44, + [0][1][0][0][RTW89_QATAR][4] = 44, + [0][1][0][0][RTW89_FCC][5] = 76, + [0][1][0][0][RTW89_ETSI][5] = 44, + [0][1][0][0][RTW89_MKK][5] = 56, + [0][1][0][0][RTW89_IC][5] = 76, + [0][1][0][0][RTW89_KCC][5] = 68, + [0][1][0][0][RTW89_ACMA][5] = 44, + [0][1][0][0][RTW89_CHILE][5] = 48, + [0][1][0][0][RTW89_UKRAINE][5] = 44, + [0][1][0][0][RTW89_MEXICO][5] = 76, + [0][1][0][0][RTW89_CN][5] = 44, + [0][1][0][0][RTW89_QATAR][5] = 44, + [0][1][0][0][RTW89_FCC][6] = 76, + [0][1][0][0][RTW89_ETSI][6] = 44, + [0][1][0][0][RTW89_MKK][6] = 56, + [0][1][0][0][RTW89_IC][6] = 76, + [0][1][0][0][RTW89_KCC][6] = 68, + [0][1][0][0][RTW89_ACMA][6] = 44, + [0][1][0][0][RTW89_CHILE][6] = 48, + [0][1][0][0][RTW89_UKRAINE][6] = 44, + [0][1][0][0][RTW89_MEXICO][6] = 76, + [0][1][0][0][RTW89_CN][6] = 44, + [0][1][0][0][RTW89_QATAR][6] = 44, + [0][1][0][0][RTW89_FCC][7] = 76, + [0][1][0][0][RTW89_ETSI][7] = 44, + [0][1][0][0][RTW89_MKK][7] = 56, + [0][1][0][0][RTW89_IC][7] = 76, + [0][1][0][0][RTW89_KCC][7] = 68, + [0][1][0][0][RTW89_ACMA][7] = 44, + [0][1][0][0][RTW89_CHILE][7] = 48, + [0][1][0][0][RTW89_UKRAINE][7] = 44, + [0][1][0][0][RTW89_MEXICO][7] = 76, + [0][1][0][0][RTW89_CN][7] = 44, + [0][1][0][0][RTW89_QATAR][7] = 44, + [0][1][0][0][RTW89_FCC][8] = 76, + [0][1][0][0][RTW89_ETSI][8] = 44, + [0][1][0][0][RTW89_MKK][8] = 56, + [0][1][0][0][RTW89_IC][8] = 76, + [0][1][0][0][RTW89_KCC][8] = 68, + [0][1][0][0][RTW89_ACMA][8] = 44, + [0][1][0][0][RTW89_CHILE][8] = 48, + [0][1][0][0][RTW89_UKRAINE][8] = 44, + [0][1][0][0][RTW89_MEXICO][8] = 76, + [0][1][0][0][RTW89_CN][8] = 44, + [0][1][0][0][RTW89_QATAR][8] = 44, + [0][1][0][0][RTW89_FCC][9] = 76, + [0][1][0][0][RTW89_ETSI][9] = 44, + [0][1][0][0][RTW89_MKK][9] = 56, + [0][1][0][0][RTW89_IC][9] = 76, + [0][1][0][0][RTW89_KCC][9] = 68, + [0][1][0][0][RTW89_ACMA][9] = 44, + [0][1][0][0][RTW89_CHILE][9] = 48, + [0][1][0][0][RTW89_UKRAINE][9] = 44, + [0][1][0][0][RTW89_MEXICO][9] = 76, + [0][1][0][0][RTW89_CN][9] = 44, + [0][1][0][0][RTW89_QATAR][9] = 44, + [0][1][0][0][RTW89_FCC][10] = 62, + [0][1][0][0][RTW89_ETSI][10] = 44, + [0][1][0][0][RTW89_MKK][10] = 56, + [0][1][0][0][RTW89_IC][10] = 62, + [0][1][0][0][RTW89_KCC][10] = 68, + [0][1][0][0][RTW89_ACMA][10] = 44, + [0][1][0][0][RTW89_CHILE][10] = 48, + [0][1][0][0][RTW89_UKRAINE][10] = 44, + [0][1][0][0][RTW89_MEXICO][10] = 62, + [0][1][0][0][RTW89_CN][10] = 44, + [0][1][0][0][RTW89_QATAR][10] = 44, + [0][1][0][0][RTW89_FCC][11] = 52, + [0][1][0][0][RTW89_ETSI][11] = 44, + [0][1][0][0][RTW89_MKK][11] = 56, + [0][1][0][0][RTW89_IC][11] = 52, + [0][1][0][0][RTW89_KCC][11] = 68, + [0][1][0][0][RTW89_ACMA][11] = 44, + [0][1][0][0][RTW89_CHILE][11] = 48, + [0][1][0][0][RTW89_UKRAINE][11] = 44, + [0][1][0][0][RTW89_MEXICO][11] = 52, + [0][1][0][0][RTW89_CN][11] = 44, + [0][1][0][0][RTW89_QATAR][11] = 44, + [0][1][0][0][RTW89_FCC][12] = 38, + [0][1][0][0][RTW89_ETSI][12] = 44, + [0][1][0][0][RTW89_MKK][12] = 56, + [0][1][0][0][RTW89_IC][12] = 38, + [0][1][0][0][RTW89_KCC][12] = 68, + [0][1][0][0][RTW89_ACMA][12] = 44, + [0][1][0][0][RTW89_CHILE][12] = 38, + [0][1][0][0][RTW89_UKRAINE][12] = 44, + [0][1][0][0][RTW89_MEXICO][12] = 38, + [0][1][0][0][RTW89_CN][12] = 44, + [0][1][0][0][RTW89_QATAR][12] = 44, + [0][1][0][0][RTW89_FCC][13] = 127, + [0][1][0][0][RTW89_ETSI][13] = 127, + [0][1][0][0][RTW89_MKK][13] = 64, + [0][1][0][0][RTW89_IC][13] = 127, + [0][1][0][0][RTW89_KCC][13] = 127, + [0][1][0][0][RTW89_ACMA][13] = 127, + [0][1][0][0][RTW89_CHILE][13] = 127, + [0][1][0][0][RTW89_UKRAINE][13] = 127, + [0][1][0][0][RTW89_MEXICO][13] = 127, + [0][1][0][0][RTW89_CN][13] = 127, + [0][1][0][0][RTW89_QATAR][13] = 127, + [1][0][0][0][RTW89_FCC][0] = 127, + [1][0][0][0][RTW89_ETSI][0] = 127, + [1][0][0][0][RTW89_MKK][0] = 127, + [1][0][0][0][RTW89_IC][0] = 127, + [1][0][0][0][RTW89_KCC][0] = 127, + [1][0][0][0][RTW89_ACMA][0] = 127, + [1][0][0][0][RTW89_CHILE][0] = 127, + [1][0][0][0][RTW89_UKRAINE][0] = 127, + [1][0][0][0][RTW89_MEXICO][0] = 127, + [1][0][0][0][RTW89_CN][0] = 127, + [1][0][0][0][RTW89_QATAR][0] = 127, + [1][0][0][0][RTW89_FCC][1] = 127, + [1][0][0][0][RTW89_ETSI][1] = 127, + [1][0][0][0][RTW89_MKK][1] = 127, + [1][0][0][0][RTW89_IC][1] = 127, + [1][0][0][0][RTW89_KCC][1] = 127, + [1][0][0][0][RTW89_ACMA][1] = 127, + [1][0][0][0][RTW89_CHILE][1] = 127, + [1][0][0][0][RTW89_UKRAINE][1] = 127, + [1][0][0][0][RTW89_MEXICO][1] = 127, + [1][0][0][0][RTW89_CN][1] = 127, + [1][0][0][0][RTW89_QATAR][1] = 127, + [1][0][0][0][RTW89_FCC][2] = 60, + [1][0][0][0][RTW89_ETSI][2] = 58, + [1][0][0][0][RTW89_MKK][2] = 68, + [1][0][0][0][RTW89_IC][2] = 60, + [1][0][0][0][RTW89_KCC][2] = 70, + [1][0][0][0][RTW89_ACMA][2] = 58, + [1][0][0][0][RTW89_CHILE][2] = 60, + [1][0][0][0][RTW89_UKRAINE][2] = 58, + [1][0][0][0][RTW89_MEXICO][2] = 60, + [1][0][0][0][RTW89_CN][2] = 58, + [1][0][0][0][RTW89_QATAR][2] = 58, + [1][0][0][0][RTW89_FCC][3] = 60, + [1][0][0][0][RTW89_ETSI][3] = 58, + [1][0][0][0][RTW89_MKK][3] = 68, + [1][0][0][0][RTW89_IC][3] = 60, + [1][0][0][0][RTW89_KCC][3] = 70, + [1][0][0][0][RTW89_ACMA][3] = 58, + [1][0][0][0][RTW89_CHILE][3] = 60, + [1][0][0][0][RTW89_UKRAINE][3] = 58, + [1][0][0][0][RTW89_MEXICO][3] = 60, + [1][0][0][0][RTW89_CN][3] = 58, + [1][0][0][0][RTW89_QATAR][3] = 58, + [1][0][0][0][RTW89_FCC][4] = 60, + [1][0][0][0][RTW89_ETSI][4] = 58, + [1][0][0][0][RTW89_MKK][4] = 68, + [1][0][0][0][RTW89_IC][4] = 60, + [1][0][0][0][RTW89_KCC][4] = 70, + [1][0][0][0][RTW89_ACMA][4] = 58, + [1][0][0][0][RTW89_CHILE][4] = 60, + [1][0][0][0][RTW89_UKRAINE][4] = 58, + [1][0][0][0][RTW89_MEXICO][4] = 60, + [1][0][0][0][RTW89_CN][4] = 58, + [1][0][0][0][RTW89_QATAR][4] = 58, + [1][0][0][0][RTW89_FCC][5] = 60, + [1][0][0][0][RTW89_ETSI][5] = 58, + [1][0][0][0][RTW89_MKK][5] = 68, + [1][0][0][0][RTW89_IC][5] = 60, + [1][0][0][0][RTW89_KCC][5] = 70, + [1][0][0][0][RTW89_ACMA][5] = 58, + [1][0][0][0][RTW89_CHILE][5] = 60, + [1][0][0][0][RTW89_UKRAINE][5] = 58, + [1][0][0][0][RTW89_MEXICO][5] = 60, + [1][0][0][0][RTW89_CN][5] = 58, + [1][0][0][0][RTW89_QATAR][5] = 58, + [1][0][0][0][RTW89_FCC][6] = 46, + [1][0][0][0][RTW89_ETSI][6] = 58, + [1][0][0][0][RTW89_MKK][6] = 68, + [1][0][0][0][RTW89_IC][6] = 46, + [1][0][0][0][RTW89_KCC][6] = 70, + [1][0][0][0][RTW89_ACMA][6] = 58, + [1][0][0][0][RTW89_CHILE][6] = 46, + [1][0][0][0][RTW89_UKRAINE][6] = 58, + [1][0][0][0][RTW89_MEXICO][6] = 46, + [1][0][0][0][RTW89_CN][6] = 58, + [1][0][0][0][RTW89_QATAR][6] = 58, + [1][0][0][0][RTW89_FCC][7] = 46, + [1][0][0][0][RTW89_ETSI][7] = 58, + [1][0][0][0][RTW89_MKK][7] = 68, + [1][0][0][0][RTW89_IC][7] = 46, + [1][0][0][0][RTW89_KCC][7] = 70, + [1][0][0][0][RTW89_ACMA][7] = 58, + [1][0][0][0][RTW89_CHILE][7] = 46, + [1][0][0][0][RTW89_UKRAINE][7] = 58, + [1][0][0][0][RTW89_MEXICO][7] = 46, + [1][0][0][0][RTW89_CN][7] = 58, + [1][0][0][0][RTW89_QATAR][7] = 58, + [1][0][0][0][RTW89_FCC][8] = 46, + [1][0][0][0][RTW89_ETSI][8] = 58, + [1][0][0][0][RTW89_MKK][8] = 68, + [1][0][0][0][RTW89_IC][8] = 46, + [1][0][0][0][RTW89_KCC][8] = 70, + [1][0][0][0][RTW89_ACMA][8] = 58, + [1][0][0][0][RTW89_CHILE][8] = 46, + [1][0][0][0][RTW89_UKRAINE][8] = 58, + [1][0][0][0][RTW89_MEXICO][8] = 46, + [1][0][0][0][RTW89_CN][8] = 58, + [1][0][0][0][RTW89_QATAR][8] = 58, + [1][0][0][0][RTW89_FCC][9] = 32, + [1][0][0][0][RTW89_ETSI][9] = 58, + [1][0][0][0][RTW89_MKK][9] = 68, + [1][0][0][0][RTW89_IC][9] = 32, + [1][0][0][0][RTW89_KCC][9] = 70, + [1][0][0][0][RTW89_ACMA][9] = 58, + [1][0][0][0][RTW89_CHILE][9] = 32, + [1][0][0][0][RTW89_UKRAINE][9] = 58, + [1][0][0][0][RTW89_MEXICO][9] = 32, + [1][0][0][0][RTW89_CN][9] = 58, + [1][0][0][0][RTW89_QATAR][9] = 58, + [1][0][0][0][RTW89_FCC][10] = 32, + [1][0][0][0][RTW89_ETSI][10] = 58, + [1][0][0][0][RTW89_MKK][10] = 68, + [1][0][0][0][RTW89_IC][10] = 32, + [1][0][0][0][RTW89_KCC][10] = 70, + [1][0][0][0][RTW89_ACMA][10] = 58, + [1][0][0][0][RTW89_CHILE][10] = 32, + [1][0][0][0][RTW89_UKRAINE][10] = 58, + [1][0][0][0][RTW89_MEXICO][10] = 32, + [1][0][0][0][RTW89_CN][10] = 58, + [1][0][0][0][RTW89_QATAR][10] = 58, + [1][0][0][0][RTW89_FCC][11] = 127, + [1][0][0][0][RTW89_ETSI][11] = 127, + [1][0][0][0][RTW89_MKK][11] = 127, + [1][0][0][0][RTW89_IC][11] = 127, + [1][0][0][0][RTW89_KCC][11] = 127, + [1][0][0][0][RTW89_ACMA][11] = 127, + [1][0][0][0][RTW89_CHILE][11] = 127, + [1][0][0][0][RTW89_UKRAINE][11] = 127, + [1][0][0][0][RTW89_MEXICO][11] = 127, + [1][0][0][0][RTW89_CN][11] = 127, + [1][0][0][0][RTW89_QATAR][11] = 127, + [1][0][0][0][RTW89_FCC][12] = 127, + [1][0][0][0][RTW89_ETSI][12] = 127, + [1][0][0][0][RTW89_MKK][12] = 127, + [1][0][0][0][RTW89_IC][12] = 127, + [1][0][0][0][RTW89_KCC][12] = 127, + [1][0][0][0][RTW89_ACMA][12] = 127, + [1][0][0][0][RTW89_CHILE][12] = 127, + [1][0][0][0][RTW89_UKRAINE][12] = 127, + [1][0][0][0][RTW89_MEXICO][12] = 127, + [1][0][0][0][RTW89_CN][12] = 127, + [1][0][0][0][RTW89_QATAR][12] = 127, + [1][0][0][0][RTW89_FCC][13] = 127, + [1][0][0][0][RTW89_ETSI][13] = 127, + [1][0][0][0][RTW89_MKK][13] = 127, + [1][0][0][0][RTW89_IC][13] = 127, + [1][0][0][0][RTW89_KCC][13] = 127, + [1][0][0][0][RTW89_ACMA][13] = 127, + [1][0][0][0][RTW89_CHILE][13] = 127, + [1][0][0][0][RTW89_UKRAINE][13] = 127, + [1][0][0][0][RTW89_MEXICO][13] = 127, + [1][0][0][0][RTW89_CN][13] = 127, + [1][0][0][0][RTW89_QATAR][13] = 127, + [1][1][0][0][RTW89_FCC][0] = 127, + [1][1][0][0][RTW89_ETSI][0] = 127, + [1][1][0][0][RTW89_MKK][0] = 127, + [1][1][0][0][RTW89_IC][0] = 127, + [1][1][0][0][RTW89_KCC][0] = 127, + [1][1][0][0][RTW89_ACMA][0] = 127, + [1][1][0][0][RTW89_CHILE][0] = 127, + [1][1][0][0][RTW89_UKRAINE][0] = 127, + [1][1][0][0][RTW89_MEXICO][0] = 127, + [1][1][0][0][RTW89_CN][0] = 127, + [1][1][0][0][RTW89_QATAR][0] = 127, + [1][1][0][0][RTW89_FCC][1] = 127, + [1][1][0][0][RTW89_ETSI][1] = 127, + [1][1][0][0][RTW89_MKK][1] = 127, + [1][1][0][0][RTW89_IC][1] = 127, + [1][1][0][0][RTW89_KCC][1] = 127, + [1][1][0][0][RTW89_ACMA][1] = 127, + [1][1][0][0][RTW89_CHILE][1] = 127, + [1][1][0][0][RTW89_UKRAINE][1] = 127, + [1][1][0][0][RTW89_MEXICO][1] = 127, + [1][1][0][0][RTW89_CN][1] = 127, + [1][1][0][0][RTW89_QATAR][1] = 127, + [1][1][0][0][RTW89_FCC][2] = 48, + [1][1][0][0][RTW89_ETSI][2] = 46, + [1][1][0][0][RTW89_MKK][2] = 56, + [1][1][0][0][RTW89_IC][2] = 48, + [1][1][0][0][RTW89_KCC][2] = 58, + [1][1][0][0][RTW89_ACMA][2] = 46, + [1][1][0][0][RTW89_CHILE][2] = 48, + [1][1][0][0][RTW89_UKRAINE][2] = 46, + [1][1][0][0][RTW89_MEXICO][2] = 48, + [1][1][0][0][RTW89_CN][2] = 46, + [1][1][0][0][RTW89_QATAR][2] = 46, + [1][1][0][0][RTW89_FCC][3] = 48, + [1][1][0][0][RTW89_ETSI][3] = 46, + [1][1][0][0][RTW89_MKK][3] = 56, + [1][1][0][0][RTW89_IC][3] = 48, + [1][1][0][0][RTW89_KCC][3] = 58, + [1][1][0][0][RTW89_ACMA][3] = 46, + [1][1][0][0][RTW89_CHILE][3] = 48, + [1][1][0][0][RTW89_UKRAINE][3] = 46, + [1][1][0][0][RTW89_MEXICO][3] = 48, + [1][1][0][0][RTW89_CN][3] = 46, + [1][1][0][0][RTW89_QATAR][3] = 46, + [1][1][0][0][RTW89_FCC][4] = 48, + [1][1][0][0][RTW89_ETSI][4] = 46, + [1][1][0][0][RTW89_MKK][4] = 56, + [1][1][0][0][RTW89_IC][4] = 48, + [1][1][0][0][RTW89_KCC][4] = 58, + [1][1][0][0][RTW89_ACMA][4] = 46, + [1][1][0][0][RTW89_CHILE][4] = 48, + [1][1][0][0][RTW89_UKRAINE][4] = 46, + [1][1][0][0][RTW89_MEXICO][4] = 48, + [1][1][0][0][RTW89_CN][4] = 46, + [1][1][0][0][RTW89_QATAR][4] = 46, + [1][1][0][0][RTW89_FCC][5] = 58, + [1][1][0][0][RTW89_ETSI][5] = 46, + [1][1][0][0][RTW89_MKK][5] = 56, + [1][1][0][0][RTW89_IC][5] = 58, + [1][1][0][0][RTW89_KCC][5] = 58, + [1][1][0][0][RTW89_ACMA][5] = 46, + [1][1][0][0][RTW89_CHILE][5] = 48, + [1][1][0][0][RTW89_UKRAINE][5] = 46, + [1][1][0][0][RTW89_MEXICO][5] = 58, + [1][1][0][0][RTW89_CN][5] = 46, + [1][1][0][0][RTW89_QATAR][5] = 46, + [1][1][0][0][RTW89_FCC][6] = 46, + [1][1][0][0][RTW89_ETSI][6] = 46, + [1][1][0][0][RTW89_MKK][6] = 56, + [1][1][0][0][RTW89_IC][6] = 46, + [1][1][0][0][RTW89_KCC][6] = 58, + [1][1][0][0][RTW89_ACMA][6] = 46, + [1][1][0][0][RTW89_CHILE][6] = 46, + [1][1][0][0][RTW89_UKRAINE][6] = 46, + [1][1][0][0][RTW89_MEXICO][6] = 46, + [1][1][0][0][RTW89_CN][6] = 46, + [1][1][0][0][RTW89_QATAR][6] = 46, + [1][1][0][0][RTW89_FCC][7] = 46, + [1][1][0][0][RTW89_ETSI][7] = 46, + [1][1][0][0][RTW89_MKK][7] = 56, + [1][1][0][0][RTW89_IC][7] = 46, + [1][1][0][0][RTW89_KCC][7] = 58, + [1][1][0][0][RTW89_ACMA][7] = 46, + [1][1][0][0][RTW89_CHILE][7] = 46, + [1][1][0][0][RTW89_UKRAINE][7] = 46, + [1][1][0][0][RTW89_MEXICO][7] = 46, + [1][1][0][0][RTW89_CN][7] = 46, + [1][1][0][0][RTW89_QATAR][7] = 46, + [1][1][0][0][RTW89_FCC][8] = 46, + [1][1][0][0][RTW89_ETSI][8] = 46, + [1][1][0][0][RTW89_MKK][8] = 56, + [1][1][0][0][RTW89_IC][8] = 46, + [1][1][0][0][RTW89_KCC][8] = 58, + [1][1][0][0][RTW89_ACMA][8] = 46, + [1][1][0][0][RTW89_CHILE][8] = 46, + [1][1][0][0][RTW89_UKRAINE][8] = 46, + [1][1][0][0][RTW89_MEXICO][8] = 46, + [1][1][0][0][RTW89_CN][8] = 46, + [1][1][0][0][RTW89_QATAR][8] = 46, + [1][1][0][0][RTW89_FCC][9] = 24, + [1][1][0][0][RTW89_ETSI][9] = 46, + [1][1][0][0][RTW89_MKK][9] = 56, + [1][1][0][0][RTW89_IC][9] = 24, + [1][1][0][0][RTW89_KCC][9] = 58, + [1][1][0][0][RTW89_ACMA][9] = 46, + [1][1][0][0][RTW89_CHILE][9] = 24, + [1][1][0][0][RTW89_UKRAINE][9] = 46, + [1][1][0][0][RTW89_MEXICO][9] = 24, + [1][1][0][0][RTW89_CN][9] = 46, + [1][1][0][0][RTW89_QATAR][9] = 46, + [1][1][0][0][RTW89_FCC][10] = 24, + [1][1][0][0][RTW89_ETSI][10] = 46, + [1][1][0][0][RTW89_MKK][10] = 56, + [1][1][0][0][RTW89_IC][10] = 24, + [1][1][0][0][RTW89_KCC][10] = 58, + [1][1][0][0][RTW89_ACMA][10] = 46, + [1][1][0][0][RTW89_CHILE][10] = 24, + [1][1][0][0][RTW89_UKRAINE][10] = 46, + [1][1][0][0][RTW89_MEXICO][10] = 24, + [1][1][0][0][RTW89_CN][10] = 46, + [1][1][0][0][RTW89_QATAR][10] = 46, + [1][1][0][0][RTW89_FCC][11] = 127, + [1][1][0][0][RTW89_ETSI][11] = 127, + [1][1][0][0][RTW89_MKK][11] = 127, + [1][1][0][0][RTW89_IC][11] = 127, + [1][1][0][0][RTW89_KCC][11] = 127, + [1][1][0][0][RTW89_ACMA][11] = 127, + [1][1][0][0][RTW89_CHILE][11] = 127, + [1][1][0][0][RTW89_UKRAINE][11] = 127, + [1][1][0][0][RTW89_MEXICO][11] = 127, + [1][1][0][0][RTW89_CN][11] = 127, + [1][1][0][0][RTW89_QATAR][11] = 127, + [1][1][0][0][RTW89_FCC][12] = 127, + [1][1][0][0][RTW89_ETSI][12] = 127, + [1][1][0][0][RTW89_MKK][12] = 127, + [1][1][0][0][RTW89_IC][12] = 127, + [1][1][0][0][RTW89_KCC][12] = 127, + [1][1][0][0][RTW89_ACMA][12] = 127, + [1][1][0][0][RTW89_CHILE][12] = 127, + [1][1][0][0][RTW89_UKRAINE][12] = 127, + [1][1][0][0][RTW89_MEXICO][12] = 127, + [1][1][0][0][RTW89_CN][12] = 127, + [1][1][0][0][RTW89_QATAR][12] = 127, + [1][1][0][0][RTW89_FCC][13] = 127, + [1][1][0][0][RTW89_ETSI][13] = 127, + [1][1][0][0][RTW89_MKK][13] = 127, + [1][1][0][0][RTW89_IC][13] = 127, + [1][1][0][0][RTW89_KCC][13] = 127, + [1][1][0][0][RTW89_ACMA][13] = 127, + [1][1][0][0][RTW89_CHILE][13] = 127, + [1][1][0][0][RTW89_UKRAINE][13] = 127, + [1][1][0][0][RTW89_MEXICO][13] = 127, + [1][1][0][0][RTW89_CN][13] = 127, + [1][1][0][0][RTW89_QATAR][13] = 127, + [0][0][1][0][RTW89_FCC][0] = 66, + [0][0][1][0][RTW89_ETSI][0] = 58, + [0][0][1][0][RTW89_MKK][0] = 76, + [0][0][1][0][RTW89_IC][0] = 66, + [0][0][1][0][RTW89_KCC][0] = 76, + [0][0][1][0][RTW89_ACMA][0] = 58, + [0][0][1][0][RTW89_CHILE][0] = 60, + [0][0][1][0][RTW89_UKRAINE][0] = 58, + [0][0][1][0][RTW89_MEXICO][0] = 66, + [0][0][1][0][RTW89_CN][0] = 58, + [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_FCC][1] = 66, + [0][0][1][0][RTW89_ETSI][1] = 58, + [0][0][1][0][RTW89_MKK][1] = 76, + [0][0][1][0][RTW89_IC][1] = 66, + [0][0][1][0][RTW89_KCC][1] = 76, + [0][0][1][0][RTW89_ACMA][1] = 58, + [0][0][1][0][RTW89_CHILE][1] = 60, + [0][0][1][0][RTW89_UKRAINE][1] = 58, + [0][0][1][0][RTW89_MEXICO][1] = 66, + [0][0][1][0][RTW89_CN][1] = 58, + [0][0][1][0][RTW89_QATAR][1] = 58, + [0][0][1][0][RTW89_FCC][2] = 70, + [0][0][1][0][RTW89_ETSI][2] = 58, + [0][0][1][0][RTW89_MKK][2] = 76, + [0][0][1][0][RTW89_IC][2] = 70, + [0][0][1][0][RTW89_KCC][2] = 76, + [0][0][1][0][RTW89_ACMA][2] = 58, + [0][0][1][0][RTW89_CHILE][2] = 60, + [0][0][1][0][RTW89_UKRAINE][2] = 58, + [0][0][1][0][RTW89_MEXICO][2] = 70, + [0][0][1][0][RTW89_CN][2] = 58, + [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_FCC][3] = 74, + [0][0][1][0][RTW89_ETSI][3] = 58, + [0][0][1][0][RTW89_MKK][3] = 76, + [0][0][1][0][RTW89_IC][3] = 74, + [0][0][1][0][RTW89_KCC][3] = 76, + [0][0][1][0][RTW89_ACMA][3] = 58, + [0][0][1][0][RTW89_CHILE][3] = 60, + [0][0][1][0][RTW89_UKRAINE][3] = 58, + [0][0][1][0][RTW89_MEXICO][3] = 74, + [0][0][1][0][RTW89_CN][3] = 58, + [0][0][1][0][RTW89_QATAR][3] = 58, + [0][0][1][0][RTW89_FCC][4] = 78, + [0][0][1][0][RTW89_ETSI][4] = 58, + [0][0][1][0][RTW89_MKK][4] = 76, + [0][0][1][0][RTW89_IC][4] = 78, + [0][0][1][0][RTW89_KCC][4] = 76, + [0][0][1][0][RTW89_ACMA][4] = 58, + [0][0][1][0][RTW89_CHILE][4] = 60, + [0][0][1][0][RTW89_UKRAINE][4] = 58, + [0][0][1][0][RTW89_MEXICO][4] = 78, + [0][0][1][0][RTW89_CN][4] = 58, + [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_FCC][5] = 78, + [0][0][1][0][RTW89_ETSI][5] = 58, + [0][0][1][0][RTW89_MKK][5] = 76, + [0][0][1][0][RTW89_IC][5] = 78, + [0][0][1][0][RTW89_KCC][5] = 76, + [0][0][1][0][RTW89_ACMA][5] = 58, + [0][0][1][0][RTW89_CHILE][5] = 60, + [0][0][1][0][RTW89_UKRAINE][5] = 58, + [0][0][1][0][RTW89_MEXICO][5] = 78, + [0][0][1][0][RTW89_CN][5] = 58, + [0][0][1][0][RTW89_QATAR][5] = 58, + [0][0][1][0][RTW89_FCC][6] = 78, + [0][0][1][0][RTW89_ETSI][6] = 58, + [0][0][1][0][RTW89_MKK][6] = 76, + [0][0][1][0][RTW89_IC][6] = 78, + [0][0][1][0][RTW89_KCC][6] = 76, + [0][0][1][0][RTW89_ACMA][6] = 58, + [0][0][1][0][RTW89_CHILE][6] = 60, + [0][0][1][0][RTW89_UKRAINE][6] = 58, + [0][0][1][0][RTW89_MEXICO][6] = 78, + [0][0][1][0][RTW89_CN][6] = 58, + [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_FCC][7] = 74, + [0][0][1][0][RTW89_ETSI][7] = 58, + [0][0][1][0][RTW89_MKK][7] = 76, + [0][0][1][0][RTW89_IC][7] = 74, + [0][0][1][0][RTW89_KCC][7] = 76, + [0][0][1][0][RTW89_ACMA][7] = 58, + [0][0][1][0][RTW89_CHILE][7] = 60, + [0][0][1][0][RTW89_UKRAINE][7] = 58, + [0][0][1][0][RTW89_MEXICO][7] = 74, + [0][0][1][0][RTW89_CN][7] = 58, + [0][0][1][0][RTW89_QATAR][7] = 58, + [0][0][1][0][RTW89_FCC][8] = 70, + [0][0][1][0][RTW89_ETSI][8] = 58, + [0][0][1][0][RTW89_MKK][8] = 76, + [0][0][1][0][RTW89_IC][8] = 70, + [0][0][1][0][RTW89_KCC][8] = 76, + [0][0][1][0][RTW89_ACMA][8] = 58, + [0][0][1][0][RTW89_CHILE][8] = 60, + [0][0][1][0][RTW89_UKRAINE][8] = 58, + [0][0][1][0][RTW89_MEXICO][8] = 70, + [0][0][1][0][RTW89_CN][8] = 58, + [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_FCC][9] = 66, + [0][0][1][0][RTW89_ETSI][9] = 58, + [0][0][1][0][RTW89_MKK][9] = 76, + [0][0][1][0][RTW89_IC][9] = 66, + [0][0][1][0][RTW89_KCC][9] = 76, + [0][0][1][0][RTW89_ACMA][9] = 58, + [0][0][1][0][RTW89_CHILE][9] = 60, + [0][0][1][0][RTW89_UKRAINE][9] = 58, + [0][0][1][0][RTW89_MEXICO][9] = 66, + [0][0][1][0][RTW89_CN][9] = 58, + [0][0][1][0][RTW89_QATAR][9] = 58, + [0][0][1][0][RTW89_FCC][10] = 66, + [0][0][1][0][RTW89_ETSI][10] = 58, + [0][0][1][0][RTW89_MKK][10] = 76, + [0][0][1][0][RTW89_IC][10] = 66, + [0][0][1][0][RTW89_KCC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 58, + [0][0][1][0][RTW89_CHILE][10] = 60, + [0][0][1][0][RTW89_UKRAINE][10] = 58, + [0][0][1][0][RTW89_MEXICO][10] = 66, + [0][0][1][0][RTW89_CN][10] = 58, + [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_FCC][11] = 56, + [0][0][1][0][RTW89_ETSI][11] = 58, + [0][0][1][0][RTW89_MKK][11] = 76, + [0][0][1][0][RTW89_IC][11] = 56, + [0][0][1][0][RTW89_KCC][11] = 76, + [0][0][1][0][RTW89_ACMA][11] = 58, + [0][0][1][0][RTW89_CHILE][11] = 56, + [0][0][1][0][RTW89_UKRAINE][11] = 58, + [0][0][1][0][RTW89_MEXICO][11] = 56, + [0][0][1][0][RTW89_CN][11] = 58, + [0][0][1][0][RTW89_QATAR][11] = 58, + [0][0][1][0][RTW89_FCC][12] = 52, + [0][0][1][0][RTW89_ETSI][12] = 58, + [0][0][1][0][RTW89_MKK][12] = 76, + [0][0][1][0][RTW89_IC][12] = 52, + [0][0][1][0][RTW89_KCC][12] = 76, + [0][0][1][0][RTW89_ACMA][12] = 58, + [0][0][1][0][RTW89_CHILE][12] = 52, + [0][0][1][0][RTW89_UKRAINE][12] = 58, + [0][0][1][0][RTW89_MEXICO][12] = 52, + [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_FCC][13] = 127, + [0][0][1][0][RTW89_ETSI][13] = 127, + [0][0][1][0][RTW89_MKK][13] = 127, + [0][0][1][0][RTW89_IC][13] = 127, + [0][0][1][0][RTW89_KCC][13] = 127, + [0][0][1][0][RTW89_ACMA][13] = 127, + [0][0][1][0][RTW89_CHILE][13] = 127, + [0][0][1][0][RTW89_UKRAINE][13] = 127, + [0][0][1][0][RTW89_MEXICO][13] = 127, + [0][0][1][0][RTW89_CN][13] = 127, + [0][0][1][0][RTW89_QATAR][13] = 127, + [0][1][1][0][RTW89_FCC][0] = 62, + [0][1][1][0][RTW89_ETSI][0] = 46, + [0][1][1][0][RTW89_MKK][0] = 64, + [0][1][1][0][RTW89_IC][0] = 62, + [0][1][1][0][RTW89_KCC][0] = 70, + [0][1][1][0][RTW89_ACMA][0] = 46, + [0][1][1][0][RTW89_CHILE][0] = 48, + [0][1][1][0][RTW89_UKRAINE][0] = 46, + [0][1][1][0][RTW89_MEXICO][0] = 62, + [0][1][1][0][RTW89_CN][0] = 46, + [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_FCC][1] = 62, + [0][1][1][0][RTW89_ETSI][1] = 46, + [0][1][1][0][RTW89_MKK][1] = 64, + [0][1][1][0][RTW89_IC][1] = 62, + [0][1][1][0][RTW89_KCC][1] = 70, + [0][1][1][0][RTW89_ACMA][1] = 46, + [0][1][1][0][RTW89_CHILE][1] = 48, + [0][1][1][0][RTW89_UKRAINE][1] = 46, + [0][1][1][0][RTW89_MEXICO][1] = 62, + [0][1][1][0][RTW89_CN][1] = 46, + [0][1][1][0][RTW89_QATAR][1] = 46, + [0][1][1][0][RTW89_FCC][2] = 66, + [0][1][1][0][RTW89_ETSI][2] = 46, + [0][1][1][0][RTW89_MKK][2] = 64, + [0][1][1][0][RTW89_IC][2] = 66, + [0][1][1][0][RTW89_KCC][2] = 70, + [0][1][1][0][RTW89_ACMA][2] = 46, + [0][1][1][0][RTW89_CHILE][2] = 48, + [0][1][1][0][RTW89_UKRAINE][2] = 46, + [0][1][1][0][RTW89_MEXICO][2] = 66, + [0][1][1][0][RTW89_CN][2] = 46, + [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_FCC][3] = 70, + [0][1][1][0][RTW89_ETSI][3] = 46, + [0][1][1][0][RTW89_MKK][3] = 64, + [0][1][1][0][RTW89_IC][3] = 70, + [0][1][1][0][RTW89_KCC][3] = 70, + [0][1][1][0][RTW89_ACMA][3] = 46, + [0][1][1][0][RTW89_CHILE][3] = 48, + [0][1][1][0][RTW89_UKRAINE][3] = 46, + [0][1][1][0][RTW89_MEXICO][3] = 70, + [0][1][1][0][RTW89_CN][3] = 46, + [0][1][1][0][RTW89_QATAR][3] = 46, + [0][1][1][0][RTW89_FCC][4] = 78, + [0][1][1][0][RTW89_ETSI][4] = 46, + [0][1][1][0][RTW89_MKK][4] = 64, + [0][1][1][0][RTW89_IC][4] = 78, + [0][1][1][0][RTW89_KCC][4] = 70, + [0][1][1][0][RTW89_ACMA][4] = 46, + [0][1][1][0][RTW89_CHILE][4] = 48, + [0][1][1][0][RTW89_UKRAINE][4] = 46, + [0][1][1][0][RTW89_MEXICO][4] = 78, + [0][1][1][0][RTW89_CN][4] = 46, + [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_FCC][5] = 78, + [0][1][1][0][RTW89_ETSI][5] = 46, + [0][1][1][0][RTW89_MKK][5] = 64, + [0][1][1][0][RTW89_IC][5] = 78, + [0][1][1][0][RTW89_KCC][5] = 70, + [0][1][1][0][RTW89_ACMA][5] = 46, + [0][1][1][0][RTW89_CHILE][5] = 48, + [0][1][1][0][RTW89_UKRAINE][5] = 46, + [0][1][1][0][RTW89_MEXICO][5] = 78, + [0][1][1][0][RTW89_CN][5] = 46, + [0][1][1][0][RTW89_QATAR][5] = 46, + [0][1][1][0][RTW89_FCC][6] = 78, + [0][1][1][0][RTW89_ETSI][6] = 46, + [0][1][1][0][RTW89_MKK][6] = 64, + [0][1][1][0][RTW89_IC][6] = 78, + [0][1][1][0][RTW89_KCC][6] = 70, + [0][1][1][0][RTW89_ACMA][6] = 46, + [0][1][1][0][RTW89_CHILE][6] = 48, + [0][1][1][0][RTW89_UKRAINE][6] = 46, + [0][1][1][0][RTW89_MEXICO][6] = 78, + [0][1][1][0][RTW89_CN][6] = 46, + [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_FCC][7] = 70, + [0][1][1][0][RTW89_ETSI][7] = 46, + [0][1][1][0][RTW89_MKK][7] = 64, + [0][1][1][0][RTW89_IC][7] = 70, + [0][1][1][0][RTW89_KCC][7] = 70, + [0][1][1][0][RTW89_ACMA][7] = 46, + [0][1][1][0][RTW89_CHILE][7] = 48, + [0][1][1][0][RTW89_UKRAINE][7] = 46, + [0][1][1][0][RTW89_MEXICO][7] = 70, + [0][1][1][0][RTW89_CN][7] = 46, + [0][1][1][0][RTW89_QATAR][7] = 46, + [0][1][1][0][RTW89_FCC][8] = 66, + [0][1][1][0][RTW89_ETSI][8] = 46, + [0][1][1][0][RTW89_MKK][8] = 64, + [0][1][1][0][RTW89_IC][8] = 66, + [0][1][1][0][RTW89_KCC][8] = 70, + [0][1][1][0][RTW89_ACMA][8] = 46, + [0][1][1][0][RTW89_CHILE][8] = 48, + [0][1][1][0][RTW89_UKRAINE][8] = 46, + [0][1][1][0][RTW89_MEXICO][8] = 66, + [0][1][1][0][RTW89_CN][8] = 46, + [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_FCC][9] = 62, + [0][1][1][0][RTW89_ETSI][9] = 46, + [0][1][1][0][RTW89_MKK][9] = 64, + [0][1][1][0][RTW89_IC][9] = 62, + [0][1][1][0][RTW89_KCC][9] = 70, + [0][1][1][0][RTW89_ACMA][9] = 46, + [0][1][1][0][RTW89_CHILE][9] = 48, + [0][1][1][0][RTW89_UKRAINE][9] = 46, + [0][1][1][0][RTW89_MEXICO][9] = 62, + [0][1][1][0][RTW89_CN][9] = 46, + [0][1][1][0][RTW89_QATAR][9] = 46, + [0][1][1][0][RTW89_FCC][10] = 62, + [0][1][1][0][RTW89_ETSI][10] = 46, + [0][1][1][0][RTW89_MKK][10] = 64, + [0][1][1][0][RTW89_IC][10] = 62, + [0][1][1][0][RTW89_KCC][10] = 70, + [0][1][1][0][RTW89_ACMA][10] = 46, + [0][1][1][0][RTW89_CHILE][10] = 48, + [0][1][1][0][RTW89_UKRAINE][10] = 46, + [0][1][1][0][RTW89_MEXICO][10] = 62, + [0][1][1][0][RTW89_CN][10] = 46, + [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_FCC][11] = 42, + [0][1][1][0][RTW89_ETSI][11] = 46, + [0][1][1][0][RTW89_MKK][11] = 64, + [0][1][1][0][RTW89_IC][11] = 42, + [0][1][1][0][RTW89_KCC][11] = 70, + [0][1][1][0][RTW89_ACMA][11] = 46, + [0][1][1][0][RTW89_CHILE][11] = 42, + [0][1][1][0][RTW89_UKRAINE][11] = 46, + [0][1][1][0][RTW89_MEXICO][11] = 42, + [0][1][1][0][RTW89_CN][11] = 46, + [0][1][1][0][RTW89_QATAR][11] = 46, + [0][1][1][0][RTW89_FCC][12] = 40, + [0][1][1][0][RTW89_ETSI][12] = 46, + [0][1][1][0][RTW89_MKK][12] = 64, + [0][1][1][0][RTW89_IC][12] = 40, + [0][1][1][0][RTW89_KCC][12] = 70, + [0][1][1][0][RTW89_ACMA][12] = 46, + [0][1][1][0][RTW89_CHILE][12] = 40, + [0][1][1][0][RTW89_UKRAINE][12] = 46, + [0][1][1][0][RTW89_MEXICO][12] = 40, + [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_FCC][13] = 127, + [0][1][1][0][RTW89_ETSI][13] = 127, + [0][1][1][0][RTW89_MKK][13] = 127, + [0][1][1][0][RTW89_IC][13] = 127, + [0][1][1][0][RTW89_KCC][13] = 127, + [0][1][1][0][RTW89_ACMA][13] = 127, + [0][1][1][0][RTW89_CHILE][13] = 127, + [0][1][1][0][RTW89_UKRAINE][13] = 127, + [0][1][1][0][RTW89_MEXICO][13] = 127, + [0][1][1][0][RTW89_CN][13] = 127, + [0][1][1][0][RTW89_QATAR][13] = 127, + [0][0][2][0][RTW89_FCC][0] = 66, + [0][0][2][0][RTW89_ETSI][0] = 58, + [0][0][2][0][RTW89_MKK][0] = 76, + [0][0][2][0][RTW89_IC][0] = 66, + [0][0][2][0][RTW89_KCC][0] = 76, + [0][0][2][0][RTW89_ACMA][0] = 58, + [0][0][2][0][RTW89_CHILE][0] = 60, + [0][0][2][0][RTW89_UKRAINE][0] = 58, + [0][0][2][0][RTW89_MEXICO][0] = 66, + [0][0][2][0][RTW89_CN][0] = 58, + [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_FCC][1] = 66, + [0][0][2][0][RTW89_ETSI][1] = 58, + [0][0][2][0][RTW89_MKK][1] = 76, + [0][0][2][0][RTW89_IC][1] = 66, + [0][0][2][0][RTW89_KCC][1] = 76, + [0][0][2][0][RTW89_ACMA][1] = 58, + [0][0][2][0][RTW89_CHILE][1] = 60, + [0][0][2][0][RTW89_UKRAINE][1] = 58, + [0][0][2][0][RTW89_MEXICO][1] = 66, + [0][0][2][0][RTW89_CN][1] = 58, + [0][0][2][0][RTW89_QATAR][1] = 58, + [0][0][2][0][RTW89_FCC][2] = 70, + [0][0][2][0][RTW89_ETSI][2] = 58, + [0][0][2][0][RTW89_MKK][2] = 76, + [0][0][2][0][RTW89_IC][2] = 70, + [0][0][2][0][RTW89_KCC][2] = 76, + [0][0][2][0][RTW89_ACMA][2] = 58, + [0][0][2][0][RTW89_CHILE][2] = 60, + [0][0][2][0][RTW89_UKRAINE][2] = 58, + [0][0][2][0][RTW89_MEXICO][2] = 70, + [0][0][2][0][RTW89_CN][2] = 58, + [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_FCC][3] = 74, + [0][0][2][0][RTW89_ETSI][3] = 58, + [0][0][2][0][RTW89_MKK][3] = 76, + [0][0][2][0][RTW89_IC][3] = 74, + [0][0][2][0][RTW89_KCC][3] = 76, + [0][0][2][0][RTW89_ACMA][3] = 58, + [0][0][2][0][RTW89_CHILE][3] = 60, + [0][0][2][0][RTW89_UKRAINE][3] = 58, + [0][0][2][0][RTW89_MEXICO][3] = 74, + [0][0][2][0][RTW89_CN][3] = 58, + [0][0][2][0][RTW89_QATAR][3] = 58, + [0][0][2][0][RTW89_FCC][4] = 76, + [0][0][2][0][RTW89_ETSI][4] = 58, + [0][0][2][0][RTW89_MKK][4] = 76, + [0][0][2][0][RTW89_IC][4] = 76, + [0][0][2][0][RTW89_KCC][4] = 76, + [0][0][2][0][RTW89_ACMA][4] = 58, + [0][0][2][0][RTW89_CHILE][4] = 60, + [0][0][2][0][RTW89_UKRAINE][4] = 58, + [0][0][2][0][RTW89_MEXICO][4] = 76, + [0][0][2][0][RTW89_CN][4] = 58, + [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_FCC][5] = 76, + [0][0][2][0][RTW89_ETSI][5] = 58, + [0][0][2][0][RTW89_MKK][5] = 76, + [0][0][2][0][RTW89_IC][5] = 76, + [0][0][2][0][RTW89_KCC][5] = 76, + [0][0][2][0][RTW89_ACMA][5] = 58, + [0][0][2][0][RTW89_CHILE][5] = 60, + [0][0][2][0][RTW89_UKRAINE][5] = 58, + [0][0][2][0][RTW89_MEXICO][5] = 76, + [0][0][2][0][RTW89_CN][5] = 58, + [0][0][2][0][RTW89_QATAR][5] = 58, + [0][0][2][0][RTW89_FCC][6] = 76, + [0][0][2][0][RTW89_ETSI][6] = 58, + [0][0][2][0][RTW89_MKK][6] = 76, + [0][0][2][0][RTW89_IC][6] = 76, + [0][0][2][0][RTW89_KCC][6] = 76, + [0][0][2][0][RTW89_ACMA][6] = 58, + [0][0][2][0][RTW89_CHILE][6] = 60, + [0][0][2][0][RTW89_UKRAINE][6] = 58, + [0][0][2][0][RTW89_MEXICO][6] = 76, + [0][0][2][0][RTW89_CN][6] = 58, + [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_FCC][7] = 74, + [0][0][2][0][RTW89_ETSI][7] = 58, + [0][0][2][0][RTW89_MKK][7] = 76, + [0][0][2][0][RTW89_IC][7] = 74, + [0][0][2][0][RTW89_KCC][7] = 76, + [0][0][2][0][RTW89_ACMA][7] = 58, + [0][0][2][0][RTW89_CHILE][7] = 60, + [0][0][2][0][RTW89_UKRAINE][7] = 58, + [0][0][2][0][RTW89_MEXICO][7] = 74, + [0][0][2][0][RTW89_CN][7] = 58, + [0][0][2][0][RTW89_QATAR][7] = 58, + [0][0][2][0][RTW89_FCC][8] = 70, + [0][0][2][0][RTW89_ETSI][8] = 58, + [0][0][2][0][RTW89_MKK][8] = 76, + [0][0][2][0][RTW89_IC][8] = 70, + [0][0][2][0][RTW89_KCC][8] = 76, + [0][0][2][0][RTW89_ACMA][8] = 58, + [0][0][2][0][RTW89_CHILE][8] = 60, + [0][0][2][0][RTW89_UKRAINE][8] = 58, + [0][0][2][0][RTW89_MEXICO][8] = 70, + [0][0][2][0][RTW89_CN][8] = 58, + [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_FCC][9] = 66, + [0][0][2][0][RTW89_ETSI][9] = 58, + [0][0][2][0][RTW89_MKK][9] = 76, + [0][0][2][0][RTW89_IC][9] = 66, + [0][0][2][0][RTW89_KCC][9] = 76, + [0][0][2][0][RTW89_ACMA][9] = 58, + [0][0][2][0][RTW89_CHILE][9] = 60, + [0][0][2][0][RTW89_UKRAINE][9] = 58, + [0][0][2][0][RTW89_MEXICO][9] = 66, + [0][0][2][0][RTW89_CN][9] = 58, + [0][0][2][0][RTW89_QATAR][9] = 58, + [0][0][2][0][RTW89_FCC][10] = 66, + [0][0][2][0][RTW89_ETSI][10] = 58, + [0][0][2][0][RTW89_MKK][10] = 76, + [0][0][2][0][RTW89_IC][10] = 66, + [0][0][2][0][RTW89_KCC][10] = 76, + [0][0][2][0][RTW89_ACMA][10] = 58, + [0][0][2][0][RTW89_CHILE][10] = 60, + [0][0][2][0][RTW89_UKRAINE][10] = 58, + [0][0][2][0][RTW89_MEXICO][10] = 66, + [0][0][2][0][RTW89_CN][10] = 58, + [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_FCC][11] = 54, + [0][0][2][0][RTW89_ETSI][11] = 58, + [0][0][2][0][RTW89_MKK][11] = 76, + [0][0][2][0][RTW89_IC][11] = 54, + [0][0][2][0][RTW89_KCC][11] = 76, + [0][0][2][0][RTW89_ACMA][11] = 58, + [0][0][2][0][RTW89_CHILE][11] = 54, + [0][0][2][0][RTW89_UKRAINE][11] = 58, + [0][0][2][0][RTW89_MEXICO][11] = 54, + [0][0][2][0][RTW89_CN][11] = 58, + [0][0][2][0][RTW89_QATAR][11] = 58, + [0][0][2][0][RTW89_FCC][12] = 50, + [0][0][2][0][RTW89_ETSI][12] = 58, + [0][0][2][0][RTW89_MKK][12] = 76, + [0][0][2][0][RTW89_IC][12] = 50, + [0][0][2][0][RTW89_KCC][12] = 74, + [0][0][2][0][RTW89_ACMA][12] = 58, + [0][0][2][0][RTW89_CHILE][12] = 50, + [0][0][2][0][RTW89_UKRAINE][12] = 58, + [0][0][2][0][RTW89_MEXICO][12] = 50, + [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_FCC][13] = 127, + [0][0][2][0][RTW89_ETSI][13] = 127, + [0][0][2][0][RTW89_MKK][13] = 127, + [0][0][2][0][RTW89_IC][13] = 127, + [0][0][2][0][RTW89_KCC][13] = 127, + [0][0][2][0][RTW89_ACMA][13] = 127, + [0][0][2][0][RTW89_CHILE][13] = 127, + [0][0][2][0][RTW89_UKRAINE][13] = 127, + [0][0][2][0][RTW89_MEXICO][13] = 127, + [0][0][2][0][RTW89_CN][13] = 127, + [0][0][2][0][RTW89_QATAR][13] = 127, + [0][1][2][0][RTW89_FCC][0] = 62, + [0][1][2][0][RTW89_ETSI][0] = 46, + [0][1][2][0][RTW89_MKK][0] = 64, + [0][1][2][0][RTW89_IC][0] = 62, + [0][1][2][0][RTW89_KCC][0] = 68, + [0][1][2][0][RTW89_ACMA][0] = 46, + [0][1][2][0][RTW89_CHILE][0] = 48, + [0][1][2][0][RTW89_UKRAINE][0] = 46, + [0][1][2][0][RTW89_MEXICO][0] = 62, + [0][1][2][0][RTW89_CN][0] = 46, + [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_FCC][1] = 62, + [0][1][2][0][RTW89_ETSI][1] = 46, + [0][1][2][0][RTW89_MKK][1] = 64, + [0][1][2][0][RTW89_IC][1] = 62, + [0][1][2][0][RTW89_KCC][1] = 70, + [0][1][2][0][RTW89_ACMA][1] = 46, + [0][1][2][0][RTW89_CHILE][1] = 48, + [0][1][2][0][RTW89_UKRAINE][1] = 46, + [0][1][2][0][RTW89_MEXICO][1] = 62, + [0][1][2][0][RTW89_CN][1] = 46, + [0][1][2][0][RTW89_QATAR][1] = 46, + [0][1][2][0][RTW89_FCC][2] = 66, + [0][1][2][0][RTW89_ETSI][2] = 46, + [0][1][2][0][RTW89_MKK][2] = 64, + [0][1][2][0][RTW89_IC][2] = 66, + [0][1][2][0][RTW89_KCC][2] = 70, + [0][1][2][0][RTW89_ACMA][2] = 46, + [0][1][2][0][RTW89_CHILE][2] = 48, + [0][1][2][0][RTW89_UKRAINE][2] = 46, + [0][1][2][0][RTW89_MEXICO][2] = 66, + [0][1][2][0][RTW89_CN][2] = 46, + [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_FCC][3] = 70, + [0][1][2][0][RTW89_ETSI][3] = 46, + [0][1][2][0][RTW89_MKK][3] = 64, + [0][1][2][0][RTW89_IC][3] = 70, + [0][1][2][0][RTW89_KCC][3] = 70, + [0][1][2][0][RTW89_ACMA][3] = 46, + [0][1][2][0][RTW89_CHILE][3] = 48, + [0][1][2][0][RTW89_UKRAINE][3] = 46, + [0][1][2][0][RTW89_MEXICO][3] = 70, + [0][1][2][0][RTW89_CN][3] = 46, + [0][1][2][0][RTW89_QATAR][3] = 46, + [0][1][2][0][RTW89_FCC][4] = 76, + [0][1][2][0][RTW89_ETSI][4] = 46, + [0][1][2][0][RTW89_MKK][4] = 64, + [0][1][2][0][RTW89_IC][4] = 76, + [0][1][2][0][RTW89_KCC][4] = 70, + [0][1][2][0][RTW89_ACMA][4] = 46, + [0][1][2][0][RTW89_CHILE][4] = 48, + [0][1][2][0][RTW89_UKRAINE][4] = 46, + [0][1][2][0][RTW89_MEXICO][4] = 76, + [0][1][2][0][RTW89_CN][4] = 46, + [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_FCC][5] = 76, + [0][1][2][0][RTW89_ETSI][5] = 46, + [0][1][2][0][RTW89_MKK][5] = 64, + [0][1][2][0][RTW89_IC][5] = 76, + [0][1][2][0][RTW89_KCC][5] = 70, + [0][1][2][0][RTW89_ACMA][5] = 46, + [0][1][2][0][RTW89_CHILE][5] = 48, + [0][1][2][0][RTW89_UKRAINE][5] = 46, + [0][1][2][0][RTW89_MEXICO][5] = 76, + [0][1][2][0][RTW89_CN][5] = 46, + [0][1][2][0][RTW89_QATAR][5] = 46, + [0][1][2][0][RTW89_FCC][6] = 76, + [0][1][2][0][RTW89_ETSI][6] = 46, + [0][1][2][0][RTW89_MKK][6] = 64, + [0][1][2][0][RTW89_IC][6] = 76, + [0][1][2][0][RTW89_KCC][6] = 70, + [0][1][2][0][RTW89_ACMA][6] = 46, + [0][1][2][0][RTW89_CHILE][6] = 48, + [0][1][2][0][RTW89_UKRAINE][6] = 46, + [0][1][2][0][RTW89_MEXICO][6] = 76, + [0][1][2][0][RTW89_CN][6] = 46, + [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_FCC][7] = 68, + [0][1][2][0][RTW89_ETSI][7] = 46, + [0][1][2][0][RTW89_MKK][7] = 64, + [0][1][2][0][RTW89_IC][7] = 68, + [0][1][2][0][RTW89_KCC][7] = 70, + [0][1][2][0][RTW89_ACMA][7] = 46, + [0][1][2][0][RTW89_CHILE][7] = 48, + [0][1][2][0][RTW89_UKRAINE][7] = 46, + [0][1][2][0][RTW89_MEXICO][7] = 68, + [0][1][2][0][RTW89_CN][7] = 46, + [0][1][2][0][RTW89_QATAR][7] = 46, + [0][1][2][0][RTW89_FCC][8] = 64, + [0][1][2][0][RTW89_ETSI][8] = 46, + [0][1][2][0][RTW89_MKK][8] = 64, + [0][1][2][0][RTW89_IC][8] = 64, + [0][1][2][0][RTW89_KCC][8] = 70, + [0][1][2][0][RTW89_ACMA][8] = 46, + [0][1][2][0][RTW89_CHILE][8] = 48, + [0][1][2][0][RTW89_UKRAINE][8] = 46, + [0][1][2][0][RTW89_MEXICO][8] = 64, + [0][1][2][0][RTW89_CN][8] = 46, + [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_FCC][9] = 60, + [0][1][2][0][RTW89_ETSI][9] = 46, + [0][1][2][0][RTW89_MKK][9] = 64, + [0][1][2][0][RTW89_IC][9] = 60, + [0][1][2][0][RTW89_KCC][9] = 70, + [0][1][2][0][RTW89_ACMA][9] = 46, + [0][1][2][0][RTW89_CHILE][9] = 48, + [0][1][2][0][RTW89_UKRAINE][9] = 46, + [0][1][2][0][RTW89_MEXICO][9] = 60, + [0][1][2][0][RTW89_CN][9] = 46, + [0][1][2][0][RTW89_QATAR][9] = 46, + [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_ETSI][10] = 46, + [0][1][2][0][RTW89_MKK][10] = 64, + [0][1][2][0][RTW89_IC][10] = 60, + [0][1][2][0][RTW89_KCC][10] = 70, + [0][1][2][0][RTW89_ACMA][10] = 46, + [0][1][2][0][RTW89_CHILE][10] = 48, + [0][1][2][0][RTW89_UKRAINE][10] = 46, + [0][1][2][0][RTW89_MEXICO][10] = 60, + [0][1][2][0][RTW89_CN][10] = 46, + [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_FCC][11] = 42, + [0][1][2][0][RTW89_ETSI][11] = 46, + [0][1][2][0][RTW89_MKK][11] = 64, + [0][1][2][0][RTW89_IC][11] = 42, + [0][1][2][0][RTW89_KCC][11] = 70, + [0][1][2][0][RTW89_ACMA][11] = 46, + [0][1][2][0][RTW89_CHILE][11] = 42, + [0][1][2][0][RTW89_UKRAINE][11] = 46, + [0][1][2][0][RTW89_MEXICO][11] = 42, + [0][1][2][0][RTW89_CN][11] = 46, + [0][1][2][0][RTW89_QATAR][11] = 46, + [0][1][2][0][RTW89_FCC][12] = 40, + [0][1][2][0][RTW89_ETSI][12] = 46, + [0][1][2][0][RTW89_MKK][12] = 64, + [0][1][2][0][RTW89_IC][12] = 40, + [0][1][2][0][RTW89_KCC][12] = 68, + [0][1][2][0][RTW89_ACMA][12] = 46, + [0][1][2][0][RTW89_CHILE][12] = 40, + [0][1][2][0][RTW89_UKRAINE][12] = 46, + [0][1][2][0][RTW89_MEXICO][12] = 40, + [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_FCC][13] = 127, + [0][1][2][0][RTW89_ETSI][13] = 127, + [0][1][2][0][RTW89_MKK][13] = 127, + [0][1][2][0][RTW89_IC][13] = 127, + [0][1][2][0][RTW89_KCC][13] = 127, + [0][1][2][0][RTW89_ACMA][13] = 127, + [0][1][2][0][RTW89_CHILE][13] = 127, + [0][1][2][0][RTW89_UKRAINE][13] = 127, + [0][1][2][0][RTW89_MEXICO][13] = 127, + [0][1][2][0][RTW89_CN][13] = 127, + [0][1][2][0][RTW89_QATAR][13] = 127, + [0][1][2][1][RTW89_FCC][0] = 62, + [0][1][2][1][RTW89_ETSI][0] = 34, + [0][1][2][1][RTW89_MKK][0] = 64, + [0][1][2][1][RTW89_IC][0] = 62, + [0][1][2][1][RTW89_KCC][0] = 68, + [0][1][2][1][RTW89_ACMA][0] = 34, + [0][1][2][1][RTW89_CHILE][0] = 36, + [0][1][2][1][RTW89_UKRAINE][0] = 34, + [0][1][2][1][RTW89_MEXICO][0] = 62, + [0][1][2][1][RTW89_CN][0] = 34, + [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_FCC][1] = 62, + [0][1][2][1][RTW89_ETSI][1] = 34, + [0][1][2][1][RTW89_MKK][1] = 64, + [0][1][2][1][RTW89_IC][1] = 62, + [0][1][2][1][RTW89_KCC][1] = 70, + [0][1][2][1][RTW89_ACMA][1] = 34, + [0][1][2][1][RTW89_CHILE][1] = 36, + [0][1][2][1][RTW89_UKRAINE][1] = 34, + [0][1][2][1][RTW89_MEXICO][1] = 62, + [0][1][2][1][RTW89_CN][1] = 34, + [0][1][2][1][RTW89_QATAR][1] = 34, + [0][1][2][1][RTW89_FCC][2] = 66, + [0][1][2][1][RTW89_ETSI][2] = 34, + [0][1][2][1][RTW89_MKK][2] = 64, + [0][1][2][1][RTW89_IC][2] = 66, + [0][1][2][1][RTW89_KCC][2] = 70, + [0][1][2][1][RTW89_ACMA][2] = 34, + [0][1][2][1][RTW89_CHILE][2] = 36, + [0][1][2][1][RTW89_UKRAINE][2] = 34, + [0][1][2][1][RTW89_MEXICO][2] = 66, + [0][1][2][1][RTW89_CN][2] = 34, + [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_FCC][3] = 70, + [0][1][2][1][RTW89_ETSI][3] = 34, + [0][1][2][1][RTW89_MKK][3] = 64, + [0][1][2][1][RTW89_IC][3] = 70, + [0][1][2][1][RTW89_KCC][3] = 70, + [0][1][2][1][RTW89_ACMA][3] = 34, + [0][1][2][1][RTW89_CHILE][3] = 36, + [0][1][2][1][RTW89_UKRAINE][3] = 34, + [0][1][2][1][RTW89_MEXICO][3] = 70, + [0][1][2][1][RTW89_CN][3] = 34, + [0][1][2][1][RTW89_QATAR][3] = 34, + [0][1][2][1][RTW89_FCC][4] = 76, + [0][1][2][1][RTW89_ETSI][4] = 34, + [0][1][2][1][RTW89_MKK][4] = 64, + [0][1][2][1][RTW89_IC][4] = 76, + [0][1][2][1][RTW89_KCC][4] = 70, + [0][1][2][1][RTW89_ACMA][4] = 34, + [0][1][2][1][RTW89_CHILE][4] = 36, + [0][1][2][1][RTW89_UKRAINE][4] = 34, + [0][1][2][1][RTW89_MEXICO][4] = 76, + [0][1][2][1][RTW89_CN][4] = 34, + [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_FCC][5] = 76, + [0][1][2][1][RTW89_ETSI][5] = 34, + [0][1][2][1][RTW89_MKK][5] = 64, + [0][1][2][1][RTW89_IC][5] = 76, + [0][1][2][1][RTW89_KCC][5] = 70, + [0][1][2][1][RTW89_ACMA][5] = 34, + [0][1][2][1][RTW89_CHILE][5] = 36, + [0][1][2][1][RTW89_UKRAINE][5] = 34, + [0][1][2][1][RTW89_MEXICO][5] = 76, + [0][1][2][1][RTW89_CN][5] = 34, + [0][1][2][1][RTW89_QATAR][5] = 34, + [0][1][2][1][RTW89_FCC][6] = 76, + [0][1][2][1][RTW89_ETSI][6] = 34, + [0][1][2][1][RTW89_MKK][6] = 64, + [0][1][2][1][RTW89_IC][6] = 76, + [0][1][2][1][RTW89_KCC][6] = 70, + [0][1][2][1][RTW89_ACMA][6] = 34, + [0][1][2][1][RTW89_CHILE][6] = 36, + [0][1][2][1][RTW89_UKRAINE][6] = 34, + [0][1][2][1][RTW89_MEXICO][6] = 76, + [0][1][2][1][RTW89_CN][6] = 34, + [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_FCC][7] = 68, + [0][1][2][1][RTW89_ETSI][7] = 34, + [0][1][2][1][RTW89_MKK][7] = 64, + [0][1][2][1][RTW89_IC][7] = 68, + [0][1][2][1][RTW89_KCC][7] = 70, + [0][1][2][1][RTW89_ACMA][7] = 34, + [0][1][2][1][RTW89_CHILE][7] = 36, + [0][1][2][1][RTW89_UKRAINE][7] = 34, + [0][1][2][1][RTW89_MEXICO][7] = 68, + [0][1][2][1][RTW89_CN][7] = 34, + [0][1][2][1][RTW89_QATAR][7] = 34, + [0][1][2][1][RTW89_FCC][8] = 64, + [0][1][2][1][RTW89_ETSI][8] = 34, + [0][1][2][1][RTW89_MKK][8] = 64, + [0][1][2][1][RTW89_IC][8] = 64, + [0][1][2][1][RTW89_KCC][8] = 70, + [0][1][2][1][RTW89_ACMA][8] = 34, + [0][1][2][1][RTW89_CHILE][8] = 36, + [0][1][2][1][RTW89_UKRAINE][8] = 34, + [0][1][2][1][RTW89_MEXICO][8] = 64, + [0][1][2][1][RTW89_CN][8] = 34, + [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_FCC][9] = 60, + [0][1][2][1][RTW89_ETSI][9] = 34, + [0][1][2][1][RTW89_MKK][9] = 64, + [0][1][2][1][RTW89_IC][9] = 60, + [0][1][2][1][RTW89_KCC][9] = 70, + [0][1][2][1][RTW89_ACMA][9] = 34, + [0][1][2][1][RTW89_CHILE][9] = 36, + [0][1][2][1][RTW89_UKRAINE][9] = 34, + [0][1][2][1][RTW89_MEXICO][9] = 60, + [0][1][2][1][RTW89_CN][9] = 34, + [0][1][2][1][RTW89_QATAR][9] = 34, + [0][1][2][1][RTW89_FCC][10] = 60, + [0][1][2][1][RTW89_ETSI][10] = 34, + [0][1][2][1][RTW89_MKK][10] = 64, + [0][1][2][1][RTW89_IC][10] = 60, + [0][1][2][1][RTW89_KCC][10] = 70, + [0][1][2][1][RTW89_ACMA][10] = 34, + [0][1][2][1][RTW89_CHILE][10] = 36, + [0][1][2][1][RTW89_UKRAINE][10] = 34, + [0][1][2][1][RTW89_MEXICO][10] = 60, + [0][1][2][1][RTW89_CN][10] = 34, + [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_FCC][11] = 42, + [0][1][2][1][RTW89_ETSI][11] = 34, + [0][1][2][1][RTW89_MKK][11] = 64, + [0][1][2][1][RTW89_IC][11] = 42, + [0][1][2][1][RTW89_KCC][11] = 70, + [0][1][2][1][RTW89_ACMA][11] = 34, + [0][1][2][1][RTW89_CHILE][11] = 36, + [0][1][2][1][RTW89_UKRAINE][11] = 34, + [0][1][2][1][RTW89_MEXICO][11] = 42, + [0][1][2][1][RTW89_CN][11] = 34, + [0][1][2][1][RTW89_QATAR][11] = 34, + [0][1][2][1][RTW89_FCC][12] = 40, + [0][1][2][1][RTW89_ETSI][12] = 34, + [0][1][2][1][RTW89_MKK][12] = 64, + [0][1][2][1][RTW89_IC][12] = 40, + [0][1][2][1][RTW89_KCC][12] = 68, + [0][1][2][1][RTW89_ACMA][12] = 34, + [0][1][2][1][RTW89_CHILE][12] = 36, + [0][1][2][1][RTW89_UKRAINE][12] = 34, + [0][1][2][1][RTW89_MEXICO][12] = 40, + [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_FCC][13] = 127, + [0][1][2][1][RTW89_ETSI][13] = 127, + [0][1][2][1][RTW89_MKK][13] = 127, + [0][1][2][1][RTW89_IC][13] = 127, + [0][1][2][1][RTW89_KCC][13] = 127, + [0][1][2][1][RTW89_ACMA][13] = 127, + [0][1][2][1][RTW89_CHILE][13] = 127, + [0][1][2][1][RTW89_UKRAINE][13] = 127, + [0][1][2][1][RTW89_MEXICO][13] = 127, + [0][1][2][1][RTW89_CN][13] = 127, + [0][1][2][1][RTW89_QATAR][13] = 127, + [1][0][2][0][RTW89_FCC][0] = 127, + [1][0][2][0][RTW89_ETSI][0] = 127, + [1][0][2][0][RTW89_MKK][0] = 127, + [1][0][2][0][RTW89_IC][0] = 127, + [1][0][2][0][RTW89_KCC][0] = 127, + [1][0][2][0][RTW89_ACMA][0] = 127, + [1][0][2][0][RTW89_CHILE][0] = 127, + [1][0][2][0][RTW89_UKRAINE][0] = 127, + [1][0][2][0][RTW89_MEXICO][0] = 127, + [1][0][2][0][RTW89_CN][0] = 127, + [1][0][2][0][RTW89_QATAR][0] = 127, + [1][0][2][0][RTW89_FCC][1] = 127, + [1][0][2][0][RTW89_ETSI][1] = 127, + [1][0][2][0][RTW89_MKK][1] = 127, + [1][0][2][0][RTW89_IC][1] = 127, + [1][0][2][0][RTW89_KCC][1] = 127, + [1][0][2][0][RTW89_ACMA][1] = 127, + [1][0][2][0][RTW89_CHILE][1] = 127, + [1][0][2][0][RTW89_UKRAINE][1] = 127, + [1][0][2][0][RTW89_MEXICO][1] = 127, + [1][0][2][0][RTW89_CN][1] = 127, + [1][0][2][0][RTW89_QATAR][1] = 127, + [1][0][2][0][RTW89_FCC][2] = 56, + [1][0][2][0][RTW89_ETSI][2] = 58, + [1][0][2][0][RTW89_MKK][2] = 68, + [1][0][2][0][RTW89_IC][2] = 56, + [1][0][2][0][RTW89_KCC][2] = 68, + [1][0][2][0][RTW89_ACMA][2] = 58, + [1][0][2][0][RTW89_CHILE][2] = 56, + [1][0][2][0][RTW89_UKRAINE][2] = 58, + [1][0][2][0][RTW89_MEXICO][2] = 56, + [1][0][2][0][RTW89_CN][2] = 58, + [1][0][2][0][RTW89_QATAR][2] = 58, + [1][0][2][0][RTW89_FCC][3] = 56, + [1][0][2][0][RTW89_ETSI][3] = 58, + [1][0][2][0][RTW89_MKK][3] = 68, + [1][0][2][0][RTW89_IC][3] = 56, + [1][0][2][0][RTW89_KCC][3] = 68, + [1][0][2][0][RTW89_ACMA][3] = 58, + [1][0][2][0][RTW89_CHILE][3] = 56, + [1][0][2][0][RTW89_UKRAINE][3] = 58, + [1][0][2][0][RTW89_MEXICO][3] = 56, + [1][0][2][0][RTW89_CN][3] = 58, + [1][0][2][0][RTW89_QATAR][3] = 58, + [1][0][2][0][RTW89_FCC][4] = 60, + [1][0][2][0][RTW89_ETSI][4] = 58, + [1][0][2][0][RTW89_MKK][4] = 68, + [1][0][2][0][RTW89_IC][4] = 60, + [1][0][2][0][RTW89_KCC][4] = 68, + [1][0][2][0][RTW89_ACMA][4] = 58, + [1][0][2][0][RTW89_CHILE][4] = 60, + [1][0][2][0][RTW89_UKRAINE][4] = 58, + [1][0][2][0][RTW89_MEXICO][4] = 60, + [1][0][2][0][RTW89_CN][4] = 58, + [1][0][2][0][RTW89_QATAR][4] = 58, + [1][0][2][0][RTW89_FCC][5] = 64, + [1][0][2][0][RTW89_ETSI][5] = 58, + [1][0][2][0][RTW89_MKK][5] = 68, + [1][0][2][0][RTW89_IC][5] = 64, + [1][0][2][0][RTW89_KCC][5] = 68, + [1][0][2][0][RTW89_ACMA][5] = 58, + [1][0][2][0][RTW89_CHILE][5] = 60, + [1][0][2][0][RTW89_UKRAINE][5] = 58, + [1][0][2][0][RTW89_MEXICO][5] = 64, + [1][0][2][0][RTW89_CN][5] = 58, + [1][0][2][0][RTW89_QATAR][5] = 58, + [1][0][2][0][RTW89_FCC][6] = 54, + [1][0][2][0][RTW89_ETSI][6] = 58, + [1][0][2][0][RTW89_MKK][6] = 68, + [1][0][2][0][RTW89_IC][6] = 54, + [1][0][2][0][RTW89_KCC][6] = 68, + [1][0][2][0][RTW89_ACMA][6] = 58, + [1][0][2][0][RTW89_CHILE][6] = 54, + [1][0][2][0][RTW89_UKRAINE][6] = 58, + [1][0][2][0][RTW89_MEXICO][6] = 54, + [1][0][2][0][RTW89_CN][6] = 58, + [1][0][2][0][RTW89_QATAR][6] = 58, + [1][0][2][0][RTW89_FCC][7] = 50, + [1][0][2][0][RTW89_ETSI][7] = 58, + [1][0][2][0][RTW89_MKK][7] = 68, + [1][0][2][0][RTW89_IC][7] = 50, + [1][0][2][0][RTW89_KCC][7] = 68, + [1][0][2][0][RTW89_ACMA][7] = 58, + [1][0][2][0][RTW89_CHILE][7] = 50, + [1][0][2][0][RTW89_UKRAINE][7] = 58, + [1][0][2][0][RTW89_MEXICO][7] = 50, + [1][0][2][0][RTW89_CN][7] = 58, + [1][0][2][0][RTW89_QATAR][7] = 58, + [1][0][2][0][RTW89_FCC][8] = 50, + [1][0][2][0][RTW89_ETSI][8] = 58, + [1][0][2][0][RTW89_MKK][8] = 68, + [1][0][2][0][RTW89_IC][8] = 50, + [1][0][2][0][RTW89_KCC][8] = 68, + [1][0][2][0][RTW89_ACMA][8] = 58, + [1][0][2][0][RTW89_CHILE][8] = 50, + [1][0][2][0][RTW89_UKRAINE][8] = 58, + [1][0][2][0][RTW89_MEXICO][8] = 50, + [1][0][2][0][RTW89_CN][8] = 58, + [1][0][2][0][RTW89_QATAR][8] = 58, + [1][0][2][0][RTW89_FCC][9] = 42, + [1][0][2][0][RTW89_ETSI][9] = 58, + [1][0][2][0][RTW89_MKK][9] = 68, + [1][0][2][0][RTW89_IC][9] = 42, + [1][0][2][0][RTW89_KCC][9] = 68, + [1][0][2][0][RTW89_ACMA][9] = 58, + [1][0][2][0][RTW89_CHILE][9] = 42, + [1][0][2][0][RTW89_UKRAINE][9] = 58, + [1][0][2][0][RTW89_MEXICO][9] = 42, + [1][0][2][0][RTW89_CN][9] = 58, + [1][0][2][0][RTW89_QATAR][9] = 58, + [1][0][2][0][RTW89_FCC][10] = 40, + [1][0][2][0][RTW89_ETSI][10] = 58, + [1][0][2][0][RTW89_MKK][10] = 68, + [1][0][2][0][RTW89_IC][10] = 40, + [1][0][2][0][RTW89_KCC][10] = 68, + [1][0][2][0][RTW89_ACMA][10] = 58, + [1][0][2][0][RTW89_CHILE][10] = 40, + [1][0][2][0][RTW89_UKRAINE][10] = 58, + [1][0][2][0][RTW89_MEXICO][10] = 40, + [1][0][2][0][RTW89_CN][10] = 58, + [1][0][2][0][RTW89_QATAR][10] = 58, + [1][0][2][0][RTW89_FCC][11] = 127, + [1][0][2][0][RTW89_ETSI][11] = 127, + [1][0][2][0][RTW89_MKK][11] = 127, + [1][0][2][0][RTW89_IC][11] = 127, + [1][0][2][0][RTW89_KCC][11] = 127, + [1][0][2][0][RTW89_ACMA][11] = 127, + [1][0][2][0][RTW89_CHILE][11] = 127, + [1][0][2][0][RTW89_UKRAINE][11] = 127, + [1][0][2][0][RTW89_MEXICO][11] = 127, + [1][0][2][0][RTW89_CN][11] = 127, + [1][0][2][0][RTW89_QATAR][11] = 127, + [1][0][2][0][RTW89_FCC][12] = 127, + [1][0][2][0][RTW89_ETSI][12] = 127, + [1][0][2][0][RTW89_MKK][12] = 127, + [1][0][2][0][RTW89_IC][12] = 127, + [1][0][2][0][RTW89_KCC][12] = 127, + [1][0][2][0][RTW89_ACMA][12] = 127, + [1][0][2][0][RTW89_CHILE][12] = 127, + [1][0][2][0][RTW89_UKRAINE][12] = 127, + [1][0][2][0][RTW89_MEXICO][12] = 127, + [1][0][2][0][RTW89_CN][12] = 127, + [1][0][2][0][RTW89_QATAR][12] = 127, + [1][0][2][0][RTW89_FCC][13] = 127, + [1][0][2][0][RTW89_ETSI][13] = 127, + [1][0][2][0][RTW89_MKK][13] = 127, + [1][0][2][0][RTW89_IC][13] = 127, + [1][0][2][0][RTW89_KCC][13] = 127, + [1][0][2][0][RTW89_ACMA][13] = 127, + [1][0][2][0][RTW89_CHILE][13] = 127, + [1][0][2][0][RTW89_UKRAINE][13] = 127, + [1][0][2][0][RTW89_MEXICO][13] = 127, + [1][0][2][0][RTW89_CN][13] = 127, + [1][0][2][0][RTW89_QATAR][13] = 127, + [1][1][2][0][RTW89_FCC][0] = 127, + [1][1][2][0][RTW89_ETSI][0] = 127, + [1][1][2][0][RTW89_MKK][0] = 127, + [1][1][2][0][RTW89_IC][0] = 127, + [1][1][2][0][RTW89_KCC][0] = 127, + [1][1][2][0][RTW89_ACMA][0] = 127, + [1][1][2][0][RTW89_CHILE][0] = 127, + [1][1][2][0][RTW89_UKRAINE][0] = 127, + [1][1][2][0][RTW89_MEXICO][0] = 127, + [1][1][2][0][RTW89_CN][0] = 127, + [1][1][2][0][RTW89_QATAR][0] = 127, + [1][1][2][0][RTW89_FCC][1] = 127, + [1][1][2][0][RTW89_ETSI][1] = 127, + [1][1][2][0][RTW89_MKK][1] = 127, + [1][1][2][0][RTW89_IC][1] = 127, + [1][1][2][0][RTW89_KCC][1] = 127, + [1][1][2][0][RTW89_ACMA][1] = 127, + [1][1][2][0][RTW89_CHILE][1] = 127, + [1][1][2][0][RTW89_UKRAINE][1] = 127, + [1][1][2][0][RTW89_MEXICO][1] = 127, + [1][1][2][0][RTW89_CN][1] = 127, + [1][1][2][0][RTW89_QATAR][1] = 127, + [1][1][2][0][RTW89_FCC][2] = 52, + [1][1][2][0][RTW89_ETSI][2] = 46, + [1][1][2][0][RTW89_MKK][2] = 64, + [1][1][2][0][RTW89_IC][2] = 52, + [1][1][2][0][RTW89_KCC][2] = 66, + [1][1][2][0][RTW89_ACMA][2] = 46, + [1][1][2][0][RTW89_CHILE][2] = 48, + [1][1][2][0][RTW89_UKRAINE][2] = 46, + [1][1][2][0][RTW89_MEXICO][2] = 52, + [1][1][2][0][RTW89_CN][2] = 46, + [1][1][2][0][RTW89_QATAR][2] = 46, + [1][1][2][0][RTW89_FCC][3] = 52, + [1][1][2][0][RTW89_ETSI][3] = 46, + [1][1][2][0][RTW89_MKK][3] = 64, + [1][1][2][0][RTW89_IC][3] = 52, + [1][1][2][0][RTW89_KCC][3] = 68, + [1][1][2][0][RTW89_ACMA][3] = 46, + [1][1][2][0][RTW89_CHILE][3] = 48, + [1][1][2][0][RTW89_UKRAINE][3] = 46, + [1][1][2][0][RTW89_MEXICO][3] = 52, + [1][1][2][0][RTW89_CN][3] = 46, + [1][1][2][0][RTW89_QATAR][3] = 46, + [1][1][2][0][RTW89_FCC][4] = 56, + [1][1][2][0][RTW89_ETSI][4] = 46, + [1][1][2][0][RTW89_MKK][4] = 64, + [1][1][2][0][RTW89_IC][4] = 56, + [1][1][2][0][RTW89_KCC][4] = 68, + [1][1][2][0][RTW89_ACMA][4] = 46, + [1][1][2][0][RTW89_CHILE][4] = 48, + [1][1][2][0][RTW89_UKRAINE][4] = 46, + [1][1][2][0][RTW89_MEXICO][4] = 56, + [1][1][2][0][RTW89_CN][4] = 46, + [1][1][2][0][RTW89_QATAR][4] = 46, + [1][1][2][0][RTW89_FCC][5] = 60, + [1][1][2][0][RTW89_ETSI][5] = 46, + [1][1][2][0][RTW89_MKK][5] = 64, + [1][1][2][0][RTW89_IC][5] = 60, + [1][1][2][0][RTW89_KCC][5] = 68, + [1][1][2][0][RTW89_ACMA][5] = 46, + [1][1][2][0][RTW89_CHILE][5] = 48, + [1][1][2][0][RTW89_UKRAINE][5] = 46, + [1][1][2][0][RTW89_MEXICO][5] = 60, + [1][1][2][0][RTW89_CN][5] = 46, + [1][1][2][0][RTW89_QATAR][5] = 46, + [1][1][2][0][RTW89_FCC][6] = 54, + [1][1][2][0][RTW89_ETSI][6] = 46, + [1][1][2][0][RTW89_MKK][6] = 64, + [1][1][2][0][RTW89_IC][6] = 52, + [1][1][2][0][RTW89_KCC][6] = 68, + [1][1][2][0][RTW89_ACMA][6] = 46, + [1][1][2][0][RTW89_CHILE][6] = 48, + [1][1][2][0][RTW89_UKRAINE][6] = 46, + [1][1][2][0][RTW89_MEXICO][6] = 54, + [1][1][2][0][RTW89_CN][6] = 46, + [1][1][2][0][RTW89_QATAR][6] = 46, + [1][1][2][0][RTW89_FCC][7] = 50, + [1][1][2][0][RTW89_ETSI][7] = 46, + [1][1][2][0][RTW89_MKK][7] = 64, + [1][1][2][0][RTW89_IC][7] = 48, + [1][1][2][0][RTW89_KCC][7] = 68, + [1][1][2][0][RTW89_ACMA][7] = 46, + [1][1][2][0][RTW89_CHILE][7] = 48, + [1][1][2][0][RTW89_UKRAINE][7] = 46, + [1][1][2][0][RTW89_MEXICO][7] = 50, + [1][1][2][0][RTW89_CN][7] = 46, + [1][1][2][0][RTW89_QATAR][7] = 46, + [1][1][2][0][RTW89_FCC][8] = 50, + [1][1][2][0][RTW89_ETSI][8] = 46, + [1][1][2][0][RTW89_MKK][8] = 64, + [1][1][2][0][RTW89_IC][8] = 48, + [1][1][2][0][RTW89_KCC][8] = 68, + [1][1][2][0][RTW89_ACMA][8] = 46, + [1][1][2][0][RTW89_CHILE][8] = 48, + [1][1][2][0][RTW89_UKRAINE][8] = 46, + [1][1][2][0][RTW89_MEXICO][8] = 50, + [1][1][2][0][RTW89_CN][8] = 46, + [1][1][2][0][RTW89_QATAR][8] = 46, + [1][1][2][0][RTW89_FCC][9] = 38, + [1][1][2][0][RTW89_ETSI][9] = 46, + [1][1][2][0][RTW89_MKK][9] = 64, + [1][1][2][0][RTW89_IC][9] = 38, + [1][1][2][0][RTW89_KCC][9] = 68, + [1][1][2][0][RTW89_ACMA][9] = 46, + [1][1][2][0][RTW89_CHILE][9] = 38, + [1][1][2][0][RTW89_UKRAINE][9] = 46, + [1][1][2][0][RTW89_MEXICO][9] = 38, + [1][1][2][0][RTW89_CN][9] = 46, + [1][1][2][0][RTW89_QATAR][9] = 46, + [1][1][2][0][RTW89_FCC][10] = 36, + [1][1][2][0][RTW89_ETSI][10] = 46, + [1][1][2][0][RTW89_MKK][10] = 64, + [1][1][2][0][RTW89_IC][10] = 36, + [1][1][2][0][RTW89_KCC][10] = 66, + [1][1][2][0][RTW89_ACMA][10] = 46, + [1][1][2][0][RTW89_CHILE][10] = 36, + [1][1][2][0][RTW89_UKRAINE][10] = 46, + [1][1][2][0][RTW89_MEXICO][10] = 36, + [1][1][2][0][RTW89_CN][10] = 46, + [1][1][2][0][RTW89_QATAR][10] = 46, + [1][1][2][0][RTW89_FCC][11] = 127, + [1][1][2][0][RTW89_ETSI][11] = 127, + [1][1][2][0][RTW89_MKK][11] = 127, + [1][1][2][0][RTW89_IC][11] = 127, + [1][1][2][0][RTW89_KCC][11] = 127, + [1][1][2][0][RTW89_ACMA][11] = 127, + [1][1][2][0][RTW89_CHILE][11] = 127, + [1][1][2][0][RTW89_UKRAINE][11] = 127, + [1][1][2][0][RTW89_MEXICO][11] = 127, + [1][1][2][0][RTW89_CN][11] = 127, + [1][1][2][0][RTW89_QATAR][11] = 127, + [1][1][2][0][RTW89_FCC][12] = 127, + [1][1][2][0][RTW89_ETSI][12] = 127, + [1][1][2][0][RTW89_MKK][12] = 127, + [1][1][2][0][RTW89_IC][12] = 127, + [1][1][2][0][RTW89_KCC][12] = 127, + [1][1][2][0][RTW89_ACMA][12] = 127, + [1][1][2][0][RTW89_CHILE][12] = 127, + [1][1][2][0][RTW89_UKRAINE][12] = 127, + [1][1][2][0][RTW89_MEXICO][12] = 127, + [1][1][2][0][RTW89_CN][12] = 127, + [1][1][2][0][RTW89_QATAR][12] = 127, + [1][1][2][0][RTW89_FCC][13] = 127, + [1][1][2][0][RTW89_ETSI][13] = 127, + [1][1][2][0][RTW89_MKK][13] = 127, + [1][1][2][0][RTW89_IC][13] = 127, + [1][1][2][0][RTW89_KCC][13] = 127, + [1][1][2][0][RTW89_ACMA][13] = 127, + [1][1][2][0][RTW89_CHILE][13] = 127, + [1][1][2][0][RTW89_UKRAINE][13] = 127, + [1][1][2][0][RTW89_MEXICO][13] = 127, + [1][1][2][0][RTW89_CN][13] = 127, + [1][1][2][0][RTW89_QATAR][13] = 127, + [1][1][2][1][RTW89_FCC][0] = 127, + [1][1][2][1][RTW89_ETSI][0] = 127, + [1][1][2][1][RTW89_MKK][0] = 127, + [1][1][2][1][RTW89_IC][0] = 127, + [1][1][2][1][RTW89_KCC][0] = 127, + [1][1][2][1][RTW89_ACMA][0] = 127, + [1][1][2][1][RTW89_CHILE][0] = 127, + [1][1][2][1][RTW89_UKRAINE][0] = 127, + [1][1][2][1][RTW89_MEXICO][0] = 127, + [1][1][2][1][RTW89_CN][0] = 127, + [1][1][2][1][RTW89_QATAR][0] = 127, + [1][1][2][1][RTW89_FCC][1] = 127, + [1][1][2][1][RTW89_ETSI][1] = 127, + [1][1][2][1][RTW89_MKK][1] = 127, + [1][1][2][1][RTW89_IC][1] = 127, + [1][1][2][1][RTW89_KCC][1] = 127, + [1][1][2][1][RTW89_ACMA][1] = 127, + [1][1][2][1][RTW89_CHILE][1] = 127, + [1][1][2][1][RTW89_UKRAINE][1] = 127, + [1][1][2][1][RTW89_MEXICO][1] = 127, + [1][1][2][1][RTW89_CN][1] = 127, + [1][1][2][1][RTW89_QATAR][1] = 127, + [1][1][2][1][RTW89_FCC][2] = 52, + [1][1][2][1][RTW89_ETSI][2] = 34, + [1][1][2][1][RTW89_MKK][2] = 64, + [1][1][2][1][RTW89_IC][2] = 52, + [1][1][2][1][RTW89_KCC][2] = 66, + [1][1][2][1][RTW89_ACMA][2] = 34, + [1][1][2][1][RTW89_CHILE][2] = 36, + [1][1][2][1][RTW89_UKRAINE][2] = 34, + [1][1][2][1][RTW89_MEXICO][2] = 52, + [1][1][2][1][RTW89_CN][2] = 34, + [1][1][2][1][RTW89_QATAR][2] = 34, + [1][1][2][1][RTW89_FCC][3] = 52, + [1][1][2][1][RTW89_ETSI][3] = 34, + [1][1][2][1][RTW89_MKK][3] = 64, + [1][1][2][1][RTW89_IC][3] = 52, + [1][1][2][1][RTW89_KCC][3] = 68, + [1][1][2][1][RTW89_ACMA][3] = 34, + [1][1][2][1][RTW89_CHILE][3] = 36, + [1][1][2][1][RTW89_UKRAINE][3] = 34, + [1][1][2][1][RTW89_MEXICO][3] = 52, + [1][1][2][1][RTW89_CN][3] = 34, + [1][1][2][1][RTW89_QATAR][3] = 34, + [1][1][2][1][RTW89_FCC][4] = 56, + [1][1][2][1][RTW89_ETSI][4] = 34, + [1][1][2][1][RTW89_MKK][4] = 64, + [1][1][2][1][RTW89_IC][4] = 56, + [1][1][2][1][RTW89_KCC][4] = 68, + [1][1][2][1][RTW89_ACMA][4] = 34, + [1][1][2][1][RTW89_CHILE][4] = 36, + [1][1][2][1][RTW89_UKRAINE][4] = 34, + [1][1][2][1][RTW89_MEXICO][4] = 56, + [1][1][2][1][RTW89_CN][4] = 34, + [1][1][2][1][RTW89_QATAR][4] = 34, + [1][1][2][1][RTW89_FCC][5] = 60, + [1][1][2][1][RTW89_ETSI][5] = 34, + [1][1][2][1][RTW89_MKK][5] = 64, + [1][1][2][1][RTW89_IC][5] = 60, + [1][1][2][1][RTW89_KCC][5] = 68, + [1][1][2][1][RTW89_ACMA][5] = 34, + [1][1][2][1][RTW89_CHILE][5] = 36, + [1][1][2][1][RTW89_UKRAINE][5] = 34, + [1][1][2][1][RTW89_MEXICO][5] = 60, + [1][1][2][1][RTW89_CN][5] = 34, + [1][1][2][1][RTW89_QATAR][5] = 34, + [1][1][2][1][RTW89_FCC][6] = 54, + [1][1][2][1][RTW89_ETSI][6] = 34, + [1][1][2][1][RTW89_MKK][6] = 64, + [1][1][2][1][RTW89_IC][6] = 52, + [1][1][2][1][RTW89_KCC][6] = 68, + [1][1][2][1][RTW89_ACMA][6] = 34, + [1][1][2][1][RTW89_CHILE][6] = 36, + [1][1][2][1][RTW89_UKRAINE][6] = 34, + [1][1][2][1][RTW89_MEXICO][6] = 54, + [1][1][2][1][RTW89_CN][6] = 34, + [1][1][2][1][RTW89_QATAR][6] = 34, + [1][1][2][1][RTW89_FCC][7] = 50, + [1][1][2][1][RTW89_ETSI][7] = 34, + [1][1][2][1][RTW89_MKK][7] = 64, + [1][1][2][1][RTW89_IC][7] = 48, + [1][1][2][1][RTW89_KCC][7] = 68, + [1][1][2][1][RTW89_ACMA][7] = 34, + [1][1][2][1][RTW89_CHILE][7] = 36, + [1][1][2][1][RTW89_UKRAINE][7] = 34, + [1][1][2][1][RTW89_MEXICO][7] = 50, + [1][1][2][1][RTW89_CN][7] = 34, + [1][1][2][1][RTW89_QATAR][7] = 34, + [1][1][2][1][RTW89_FCC][8] = 50, + [1][1][2][1][RTW89_ETSI][8] = 34, + [1][1][2][1][RTW89_MKK][8] = 64, + [1][1][2][1][RTW89_IC][8] = 48, + [1][1][2][1][RTW89_KCC][8] = 68, + [1][1][2][1][RTW89_ACMA][8] = 34, + [1][1][2][1][RTW89_CHILE][8] = 36, + [1][1][2][1][RTW89_UKRAINE][8] = 34, + [1][1][2][1][RTW89_MEXICO][8] = 50, + [1][1][2][1][RTW89_CN][8] = 34, + [1][1][2][1][RTW89_QATAR][8] = 34, + [1][1][2][1][RTW89_FCC][9] = 38, + [1][1][2][1][RTW89_ETSI][9] = 34, + [1][1][2][1][RTW89_MKK][9] = 64, + [1][1][2][1][RTW89_IC][9] = 38, + [1][1][2][1][RTW89_KCC][9] = 68, + [1][1][2][1][RTW89_ACMA][9] = 34, + [1][1][2][1][RTW89_CHILE][9] = 36, + [1][1][2][1][RTW89_UKRAINE][9] = 34, + [1][1][2][1][RTW89_MEXICO][9] = 38, + [1][1][2][1][RTW89_CN][9] = 34, + [1][1][2][1][RTW89_QATAR][9] = 34, + [1][1][2][1][RTW89_FCC][10] = 36, + [1][1][2][1][RTW89_ETSI][10] = 34, + [1][1][2][1][RTW89_MKK][10] = 64, + [1][1][2][1][RTW89_IC][10] = 36, + [1][1][2][1][RTW89_KCC][10] = 66, + [1][1][2][1][RTW89_ACMA][10] = 34, + [1][1][2][1][RTW89_CHILE][10] = 36, + [1][1][2][1][RTW89_UKRAINE][10] = 34, + [1][1][2][1][RTW89_MEXICO][10] = 36, + [1][1][2][1][RTW89_CN][10] = 34, + [1][1][2][1][RTW89_QATAR][10] = 34, + [1][1][2][1][RTW89_FCC][11] = 127, + [1][1][2][1][RTW89_ETSI][11] = 127, + [1][1][2][1][RTW89_MKK][11] = 127, + [1][1][2][1][RTW89_IC][11] = 127, + [1][1][2][1][RTW89_KCC][11] = 127, + [1][1][2][1][RTW89_ACMA][11] = 127, + [1][1][2][1][RTW89_CHILE][11] = 127, + [1][1][2][1][RTW89_UKRAINE][11] = 127, + [1][1][2][1][RTW89_MEXICO][11] = 127, + [1][1][2][1][RTW89_CN][11] = 127, + [1][1][2][1][RTW89_QATAR][11] = 127, + [1][1][2][1][RTW89_FCC][12] = 127, + [1][1][2][1][RTW89_ETSI][12] = 127, + [1][1][2][1][RTW89_MKK][12] = 127, + [1][1][2][1][RTW89_IC][12] = 127, + [1][1][2][1][RTW89_KCC][12] = 127, + [1][1][2][1][RTW89_ACMA][12] = 127, + [1][1][2][1][RTW89_CHILE][12] = 127, + [1][1][2][1][RTW89_UKRAINE][12] = 127, + [1][1][2][1][RTW89_MEXICO][12] = 127, + [1][1][2][1][RTW89_CN][12] = 127, + [1][1][2][1][RTW89_QATAR][12] = 127, + [1][1][2][1][RTW89_FCC][13] = 127, + [1][1][2][1][RTW89_ETSI][13] = 127, + [1][1][2][1][RTW89_MKK][13] = 127, + [1][1][2][1][RTW89_IC][13] = 127, + [1][1][2][1][RTW89_KCC][13] = 127, + [1][1][2][1][RTW89_ACMA][13] = 127, + [1][1][2][1][RTW89_CHILE][13] = 127, + [1][1][2][1][RTW89_UKRAINE][13] = 127, + [1][1][2][1][RTW89_MEXICO][13] = 127, + [1][1][2][1][RTW89_CN][13] = 127, + [1][1][2][1][RTW89_QATAR][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { - [0][0][1][0][0][0] = 30, - [0][0][1][0][0][2] = 30, - [0][0][1][0][0][4] = 30, - [0][0][1][0][0][6] = 30, - [0][0][1][0][0][8] = 52, - [0][0][1][0][0][10] = 52, - [0][0][1][0][0][12] = 52, - [0][0][1][0][0][14] = 52, - [0][0][1][0][0][15] = 52, - [0][0][1][0][0][17] = 52, - [0][0][1][0][0][19] = 52, - [0][0][1][0][0][21] = 52, - [0][0][1][0][0][23] = 52, - [0][0][1][0][0][25] = 52, - [0][0][1][0][0][27] = 52, - [0][0][1][0][0][29] = 52, - [0][0][1][0][0][31] = 52, - [0][0][1][0][0][33] = 52, - [0][0][1][0][0][35] = 52, - [0][0][1][0][0][37] = 54, - [0][0][1][0][0][38] = 28, - [0][0][1][0][0][40] = 28, - [0][0][1][0][0][42] = 28, - [0][0][1][0][0][44] = 28, - [0][0][1][0][0][46] = 28, - [0][1][1][0][0][0] = 18, - [0][1][1][0][0][2] = 18, - [0][1][1][0][0][4] = 18, - [0][1][1][0][0][6] = 18, - [0][1][1][0][0][8] = 40, - [0][1][1][0][0][10] = 40, - [0][1][1][0][0][12] = 40, - [0][1][1][0][0][14] = 40, - [0][1][1][0][0][15] = 40, - [0][1][1][0][0][17] = 40, - [0][1][1][0][0][19] = 40, - [0][1][1][0][0][21] = 40, - [0][1][1][0][0][23] = 40, - [0][1][1][0][0][25] = 40, - [0][1][1][0][0][27] = 40, - [0][1][1][0][0][29] = 40, - [0][1][1][0][0][31] = 40, - [0][1][1][0][0][33] = 40, - [0][1][1][0][0][35] = 40, - [0][1][1][0][0][37] = 42, - [0][1][1][0][0][38] = 16, - [0][1][1][0][0][40] = 16, - [0][1][1][0][0][42] = 16, - [0][1][1][0][0][44] = 16, - [0][1][1][0][0][46] = 16, - [0][0][2][0][0][0] = 30, - [0][0][2][0][0][2] = 30, - [0][0][2][0][0][4] = 30, - [0][0][2][0][0][6] = 30, - [0][0][2][0][0][8] = 52, - [0][0][2][0][0][10] = 52, - [0][0][2][0][0][12] = 52, - [0][0][2][0][0][14] = 52, - [0][0][2][0][0][15] = 52, - [0][0][2][0][0][17] = 52, - [0][0][2][0][0][19] = 52, - [0][0][2][0][0][21] = 52, - [0][0][2][0][0][23] = 52, - [0][0][2][0][0][25] = 52, - [0][0][2][0][0][27] = 52, - [0][0][2][0][0][29] = 52, - [0][0][2][0][0][31] = 52, - [0][0][2][0][0][33] = 52, - [0][0][2][0][0][35] = 52, - [0][0][2][0][0][37] = 54, - [0][0][2][0][0][38] = 28, - [0][0][2][0][0][40] = 28, - [0][0][2][0][0][42] = 28, - [0][0][2][0][0][44] = 28, - [0][0][2][0][0][46] = 28, - [0][1][2][0][0][0] = 18, - [0][1][2][0][0][2] = 18, - [0][1][2][0][0][4] = 18, - [0][1][2][0][0][6] = 18, - [0][1][2][0][0][8] = 40, - [0][1][2][0][0][10] = 40, - [0][1][2][0][0][12] = 40, - [0][1][2][0][0][14] = 40, - [0][1][2][0][0][15] = 40, - [0][1][2][0][0][17] = 40, - [0][1][2][0][0][19] = 40, - [0][1][2][0][0][21] = 40, - [0][1][2][0][0][23] = 40, - [0][1][2][0][0][25] = 40, - [0][1][2][0][0][27] = 40, - [0][1][2][0][0][29] = 40, - [0][1][2][0][0][31] = 40, - [0][1][2][0][0][33] = 40, - [0][1][2][0][0][35] = 40, - [0][1][2][0][0][37] = 42, - [0][1][2][0][0][38] = 16, - [0][1][2][0][0][40] = 16, - [0][1][2][0][0][42] = 16, - [0][1][2][0][0][44] = 16, - [0][1][2][0][0][46] = 16, - [0][1][2][1][0][0] = 6, - [0][1][2][1][0][2] = 6, - [0][1][2][1][0][4] = 6, - [0][1][2][1][0][6] = 6, - [0][1][2][1][0][8] = 28, - [0][1][2][1][0][10] = 28, - [0][1][2][1][0][12] = 28, - [0][1][2][1][0][14] = 28, - [0][1][2][1][0][15] = 28, - [0][1][2][1][0][17] = 28, - [0][1][2][1][0][19] = 28, - [0][1][2][1][0][21] = 28, - [0][1][2][1][0][23] = 28, - [0][1][2][1][0][25] = 28, - [0][1][2][1][0][27] = 28, - [0][1][2][1][0][29] = 28, - [0][1][2][1][0][31] = 28, - [0][1][2][1][0][33] = 28, - [0][1][2][1][0][35] = 28, - [0][1][2][1][0][37] = 30, - [0][1][2][1][0][38] = 4, - [0][1][2][1][0][40] = 4, - [0][1][2][1][0][42] = 4, - [0][1][2][1][0][44] = 4, - [0][1][2][1][0][46] = 4, - [1][0][2][0][0][1] = 30, - [1][0][2][0][0][5] = 30, - [1][0][2][0][0][9] = 52, - [1][0][2][0][0][13] = 52, - [1][0][2][0][0][16] = 52, - [1][0][2][0][0][20] = 52, - [1][0][2][0][0][24] = 52, - [1][0][2][0][0][28] = 52, - [1][0][2][0][0][32] = 52, - [1][0][2][0][0][36] = 54, - [1][0][2][0][0][39] = 28, - [1][0][2][0][0][43] = 28, - [1][1][2][0][0][1] = 18, - [1][1][2][0][0][5] = 18, - [1][1][2][0][0][9] = 40, - [1][1][2][0][0][13] = 40, - [1][1][2][0][0][16] = 40, - [1][1][2][0][0][20] = 40, - [1][1][2][0][0][24] = 40, - [1][1][2][0][0][28] = 40, - [1][1][2][0][0][32] = 40, - [1][1][2][0][0][36] = 42, - [1][1][2][0][0][39] = 16, - [1][1][2][0][0][43] = 16, - [1][1][2][1][0][1] = 6, - [1][1][2][1][0][5] = 6, - [1][1][2][1][0][9] = 28, - [1][1][2][1][0][13] = 28, - [1][1][2][1][0][16] = 28, - [1][1][2][1][0][20] = 28, - [1][1][2][1][0][24] = 28, - [1][1][2][1][0][28] = 28, - [1][1][2][1][0][32] = 28, - [1][1][2][1][0][36] = 30, - [1][1][2][1][0][39] = 4, - [1][1][2][1][0][43] = 4, - [2][0][2][0][0][3] = 30, - [2][0][2][0][0][11] = 52, - [2][0][2][0][0][18] = 52, - [2][0][2][0][0][26] = 52, - [2][0][2][0][0][34] = 54, - [2][0][2][0][0][41] = 28, - [2][1][2][0][0][3] = 18, - [2][1][2][0][0][11] = 40, - [2][1][2][0][0][18] = 40, - [2][1][2][0][0][26] = 40, - [2][1][2][0][0][34] = 42, - [2][1][2][0][0][41] = 16, - [2][1][2][1][0][3] = 6, - [2][1][2][1][0][11] = 28, - [2][1][2][1][0][18] = 28, - [2][1][2][1][0][26] = 28, - [2][1][2][1][0][34] = 30, - [2][1][2][1][0][41] = 4, - [0][0][1][0][2][0] = 76, - [0][0][1][0][1][0] = 58, - [0][0][1][0][3][0] = 62, - [0][0][1][0][5][0] = 62, - [0][0][1][0][6][0] = 58, - [0][0][1][0][9][0] = 58, - [0][0][1][0][8][0] = 30, - [0][0][1][0][11][0] = 52, - [0][0][1][0][2][2] = 76, - [0][0][1][0][1][2] = 58, - [0][0][1][0][3][2] = 62, - [0][0][1][0][5][2] = 62, - [0][0][1][0][6][2] = 58, - [0][0][1][0][9][2] = 58, - [0][0][1][0][8][2] = 30, - [0][0][1][0][11][2] = 52, - [0][0][1][0][2][4] = 76, - [0][0][1][0][1][4] = 58, - [0][0][1][0][3][4] = 62, - [0][0][1][0][5][4] = 62, - [0][0][1][0][6][4] = 58, - [0][0][1][0][9][4] = 58, - [0][0][1][0][8][4] = 30, - [0][0][1][0][11][4] = 52, - [0][0][1][0][2][6] = 76, - [0][0][1][0][1][6] = 58, - [0][0][1][0][3][6] = 62, - [0][0][1][0][5][6] = 62, - [0][0][1][0][6][6] = 54, - [0][0][1][0][9][6] = 58, - [0][0][1][0][8][6] = 30, - [0][0][1][0][11][6] = 52, - [0][0][1][0][2][8] = 76, - [0][0][1][0][1][8] = 58, - [0][0][1][0][3][8] = 62, - [0][0][1][0][5][8] = 64, - [0][0][1][0][6][8] = 58, - [0][0][1][0][9][8] = 58, - [0][0][1][0][8][8] = 54, - [0][0][1][0][11][8] = 52, - [0][0][1][0][2][10] = 76, - [0][0][1][0][1][10] = 58, - [0][0][1][0][3][10] = 62, - [0][0][1][0][5][10] = 64, - [0][0][1][0][6][10] = 58, - [0][0][1][0][9][10] = 58, - [0][0][1][0][8][10] = 54, - [0][0][1][0][11][10] = 52, - [0][0][1][0][2][12] = 76, - [0][0][1][0][1][12] = 58, - [0][0][1][0][3][12] = 62, - [0][0][1][0][5][12] = 64, - [0][0][1][0][6][12] = 58, - [0][0][1][0][9][12] = 58, - [0][0][1][0][8][12] = 54, - [0][0][1][0][11][12] = 52, - [0][0][1][0][2][14] = 76, - [0][0][1][0][1][14] = 58, - [0][0][1][0][3][14] = 62, - [0][0][1][0][5][14] = 64, - [0][0][1][0][6][14] = 58, - [0][0][1][0][9][14] = 58, - [0][0][1][0][8][14] = 54, - [0][0][1][0][11][14] = 52, - [0][0][1][0][2][15] = 76, - [0][0][1][0][1][15] = 58, - [0][0][1][0][3][15] = 76, - [0][0][1][0][5][15] = 76, - [0][0][1][0][6][15] = 58, - [0][0][1][0][9][15] = 58, - [0][0][1][0][8][15] = 54, - [0][0][1][0][11][15] = 52, - [0][0][1][0][2][17] = 76, - [0][0][1][0][1][17] = 58, - [0][0][1][0][3][17] = 76, - [0][0][1][0][5][17] = 76, - [0][0][1][0][6][17] = 58, - [0][0][1][0][9][17] = 58, - [0][0][1][0][8][17] = 54, - [0][0][1][0][11][17] = 52, - [0][0][1][0][2][19] = 76, - [0][0][1][0][1][19] = 58, - [0][0][1][0][3][19] = 76, - [0][0][1][0][5][19] = 76, - [0][0][1][0][6][19] = 58, - [0][0][1][0][9][19] = 58, - [0][0][1][0][8][19] = 54, - [0][0][1][0][11][19] = 52, - [0][0][1][0][2][21] = 76, - [0][0][1][0][1][21] = 58, - [0][0][1][0][3][21] = 76, - [0][0][1][0][5][21] = 76, - [0][0][1][0][6][21] = 58, - [0][0][1][0][9][21] = 58, - [0][0][1][0][8][21] = 54, - [0][0][1][0][11][21] = 52, - [0][0][1][0][2][23] = 76, - [0][0][1][0][1][23] = 58, - [0][0][1][0][3][23] = 76, - [0][0][1][0][5][23] = 76, - [0][0][1][0][6][23] = 58, - [0][0][1][0][9][23] = 58, - [0][0][1][0][8][23] = 54, - [0][0][1][0][11][23] = 52, - [0][0][1][0][2][25] = 76, - [0][0][1][0][1][25] = 58, - [0][0][1][0][3][25] = 76, - [0][0][1][0][5][25] = 127, - [0][0][1][0][6][25] = 58, - [0][0][1][0][9][25] = 127, - [0][0][1][0][8][25] = 54, - [0][0][1][0][11][25] = 52, - [0][0][1][0][2][27] = 76, - [0][0][1][0][1][27] = 58, - [0][0][1][0][3][27] = 76, - [0][0][1][0][5][27] = 127, - [0][0][1][0][6][27] = 58, - [0][0][1][0][9][27] = 127, - [0][0][1][0][8][27] = 54, - [0][0][1][0][11][27] = 52, - [0][0][1][0][2][29] = 76, - [0][0][1][0][1][29] = 58, - [0][0][1][0][3][29] = 76, - [0][0][1][0][5][29] = 127, - [0][0][1][0][6][29] = 58, - [0][0][1][0][9][29] = 127, - [0][0][1][0][8][29] = 54, - [0][0][1][0][11][29] = 52, - [0][0][1][0][2][31] = 76, - [0][0][1][0][1][31] = 58, - [0][0][1][0][3][31] = 76, - [0][0][1][0][5][31] = 76, - [0][0][1][0][6][31] = 58, - [0][0][1][0][9][31] = 58, - [0][0][1][0][8][31] = 54, - [0][0][1][0][11][31] = 52, - [0][0][1][0][2][33] = 76, - [0][0][1][0][1][33] = 58, - [0][0][1][0][3][33] = 76, - [0][0][1][0][5][33] = 76, - [0][0][1][0][6][33] = 58, - [0][0][1][0][9][33] = 58, - [0][0][1][0][8][33] = 54, - [0][0][1][0][11][33] = 52, - [0][0][1][0][2][35] = 74, - [0][0][1][0][1][35] = 58, - [0][0][1][0][3][35] = 76, - [0][0][1][0][5][35] = 74, - [0][0][1][0][6][35] = 58, - [0][0][1][0][9][35] = 58, - [0][0][1][0][8][35] = 54, - [0][0][1][0][11][35] = 52, - [0][0][1][0][2][37] = 76, - [0][0][1][0][1][37] = 127, - [0][0][1][0][3][37] = 76, - [0][0][1][0][5][37] = 76, - [0][0][1][0][6][37] = 58, - [0][0][1][0][9][37] = 76, - [0][0][1][0][8][37] = 54, - [0][0][1][0][11][37] = 127, - [0][0][1][0][2][38] = 76, - [0][0][1][0][1][38] = 28, - [0][0][1][0][3][38] = 127, - [0][0][1][0][5][38] = 76, - [0][0][1][0][6][38] = 28, - [0][0][1][0][9][38] = 76, - [0][0][1][0][8][38] = 54, - [0][0][1][0][11][38] = 52, - [0][0][1][0][2][40] = 76, - [0][0][1][0][1][40] = 28, - [0][0][1][0][3][40] = 127, - [0][0][1][0][5][40] = 76, - [0][0][1][0][6][40] = 28, - [0][0][1][0][9][40] = 76, - [0][0][1][0][8][40] = 54, - [0][0][1][0][11][40] = 52, - [0][0][1][0][2][42] = 76, - [0][0][1][0][1][42] = 28, - [0][0][1][0][3][42] = 127, - [0][0][1][0][5][42] = 76, - [0][0][1][0][6][42] = 28, - [0][0][1][0][9][42] = 76, - [0][0][1][0][8][42] = 54, - [0][0][1][0][11][42] = 52, - [0][0][1][0][2][44] = 76, - [0][0][1][0][1][44] = 28, - [0][0][1][0][3][44] = 127, - [0][0][1][0][5][44] = 76, - [0][0][1][0][6][44] = 28, - [0][0][1][0][9][44] = 76, - [0][0][1][0][8][44] = 54, - [0][0][1][0][11][44] = 52, - [0][0][1][0][2][46] = 76, - [0][0][1][0][1][46] = 28, - [0][0][1][0][3][46] = 127, - [0][0][1][0][5][46] = 76, - [0][0][1][0][6][46] = 28, - [0][0][1][0][9][46] = 76, - [0][0][1][0][8][46] = 54, - [0][0][1][0][11][46] = 52, - [0][1][1][0][2][0] = 68, - [0][1][1][0][1][0] = 46, - [0][1][1][0][3][0] = 50, - [0][1][1][0][5][0] = 40, - [0][1][1][0][6][0] = 46, - [0][1][1][0][9][0] = 46, - [0][1][1][0][8][0] = 18, - [0][1][1][0][11][0] = 40, - [0][1][1][0][2][2] = 68, - [0][1][1][0][1][2] = 46, - [0][1][1][0][3][2] = 50, - [0][1][1][0][5][2] = 40, - [0][1][1][0][6][2] = 46, - [0][1][1][0][9][2] = 46, - [0][1][1][0][8][2] = 18, - [0][1][1][0][11][2] = 40, - [0][1][1][0][2][4] = 68, - [0][1][1][0][1][4] = 46, - [0][1][1][0][3][4] = 50, - [0][1][1][0][5][4] = 40, - [0][1][1][0][6][4] = 46, - [0][1][1][0][9][4] = 46, - [0][1][1][0][8][4] = 18, - [0][1][1][0][11][4] = 40, - [0][1][1][0][2][6] = 68, - [0][1][1][0][1][6] = 46, - [0][1][1][0][3][6] = 50, - [0][1][1][0][5][6] = 40, - [0][1][1][0][6][6] = 36, - [0][1][1][0][9][6] = 46, - [0][1][1][0][8][6] = 18, - [0][1][1][0][11][6] = 40, - [0][1][1][0][2][8] = 68, - [0][1][1][0][1][8] = 46, - [0][1][1][0][3][8] = 50, - [0][1][1][0][5][8] = 52, - [0][1][1][0][6][8] = 46, - [0][1][1][0][9][8] = 46, - [0][1][1][0][8][8] = 42, - [0][1][1][0][11][8] = 40, - [0][1][1][0][2][10] = 68, - [0][1][1][0][1][10] = 46, - [0][1][1][0][3][10] = 50, - [0][1][1][0][5][10] = 52, - [0][1][1][0][6][10] = 46, - [0][1][1][0][9][10] = 46, - [0][1][1][0][8][10] = 42, - [0][1][1][0][11][10] = 40, - [0][1][1][0][2][12] = 68, - [0][1][1][0][1][12] = 46, - [0][1][1][0][3][12] = 50, - [0][1][1][0][5][12] = 52, - [0][1][1][0][6][12] = 46, - [0][1][1][0][9][12] = 46, - [0][1][1][0][8][12] = 42, - [0][1][1][0][11][12] = 40, - [0][1][1][0][2][14] = 68, - [0][1][1][0][1][14] = 46, - [0][1][1][0][3][14] = 50, - [0][1][1][0][5][14] = 52, - [0][1][1][0][6][14] = 46, - [0][1][1][0][9][14] = 46, - [0][1][1][0][8][14] = 42, - [0][1][1][0][11][14] = 40, - [0][1][1][0][2][15] = 68, - [0][1][1][0][1][15] = 46, - [0][1][1][0][3][15] = 70, - [0][1][1][0][5][15] = 68, - [0][1][1][0][6][15] = 46, - [0][1][1][0][9][15] = 46, - [0][1][1][0][8][15] = 42, - [0][1][1][0][11][15] = 40, - [0][1][1][0][2][17] = 68, - [0][1][1][0][1][17] = 46, - [0][1][1][0][3][17] = 70, - [0][1][1][0][5][17] = 68, - [0][1][1][0][6][17] = 46, - [0][1][1][0][9][17] = 46, - [0][1][1][0][8][17] = 42, - [0][1][1][0][11][17] = 40, - [0][1][1][0][2][19] = 68, - [0][1][1][0][1][19] = 46, - [0][1][1][0][3][19] = 70, - [0][1][1][0][5][19] = 68, - [0][1][1][0][6][19] = 46, - [0][1][1][0][9][19] = 46, - [0][1][1][0][8][19] = 42, - [0][1][1][0][11][19] = 40, - [0][1][1][0][2][21] = 68, - [0][1][1][0][1][21] = 46, - [0][1][1][0][3][21] = 70, - [0][1][1][0][5][21] = 68, - [0][1][1][0][6][21] = 46, - [0][1][1][0][9][21] = 46, - [0][1][1][0][8][21] = 42, - [0][1][1][0][11][21] = 40, - [0][1][1][0][2][23] = 68, - [0][1][1][0][1][23] = 46, - [0][1][1][0][3][23] = 70, - [0][1][1][0][5][23] = 68, - [0][1][1][0][6][23] = 46, - [0][1][1][0][9][23] = 46, - [0][1][1][0][8][23] = 42, - [0][1][1][0][11][23] = 40, - [0][1][1][0][2][25] = 68, - [0][1][1][0][1][25] = 46, - [0][1][1][0][3][25] = 70, - [0][1][1][0][5][25] = 127, - [0][1][1][0][6][25] = 46, - [0][1][1][0][9][25] = 127, - [0][1][1][0][8][25] = 42, - [0][1][1][0][11][25] = 40, - [0][1][1][0][2][27] = 68, - [0][1][1][0][1][27] = 46, - [0][1][1][0][3][27] = 70, - [0][1][1][0][5][27] = 127, - [0][1][1][0][6][27] = 46, - [0][1][1][0][9][27] = 127, - [0][1][1][0][8][27] = 42, - [0][1][1][0][11][27] = 40, - [0][1][1][0][2][29] = 68, - [0][1][1][0][1][29] = 46, - [0][1][1][0][3][29] = 70, - [0][1][1][0][5][29] = 127, - [0][1][1][0][6][29] = 46, - [0][1][1][0][9][29] = 127, - [0][1][1][0][8][29] = 42, - [0][1][1][0][11][29] = 40, - [0][1][1][0][2][31] = 68, - [0][1][1][0][1][31] = 46, - [0][1][1][0][3][31] = 70, - [0][1][1][0][5][31] = 68, - [0][1][1][0][6][31] = 46, - [0][1][1][0][9][31] = 46, - [0][1][1][0][8][31] = 42, - [0][1][1][0][11][31] = 40, - [0][1][1][0][2][33] = 68, - [0][1][1][0][1][33] = 46, - [0][1][1][0][3][33] = 70, - [0][1][1][0][5][33] = 68, - [0][1][1][0][6][33] = 46, - [0][1][1][0][9][33] = 46, - [0][1][1][0][8][33] = 42, - [0][1][1][0][11][33] = 40, - [0][1][1][0][2][35] = 66, - [0][1][1][0][1][35] = 46, - [0][1][1][0][3][35] = 70, - [0][1][1][0][5][35] = 66, - [0][1][1][0][6][35] = 46, - [0][1][1][0][9][35] = 46, - [0][1][1][0][8][35] = 42, - [0][1][1][0][11][35] = 40, - [0][1][1][0][2][37] = 68, - [0][1][1][0][1][37] = 127, - [0][1][1][0][3][37] = 70, - [0][1][1][0][5][37] = 68, - [0][1][1][0][6][37] = 46, - [0][1][1][0][9][37] = 68, - [0][1][1][0][8][37] = 42, - [0][1][1][0][11][37] = 127, - [0][1][1][0][2][38] = 76, - [0][1][1][0][1][38] = 16, - [0][1][1][0][3][38] = 127, - [0][1][1][0][5][38] = 76, - [0][1][1][0][6][38] = 16, - [0][1][1][0][9][38] = 76, - [0][1][1][0][8][38] = 42, - [0][1][1][0][11][38] = 40, - [0][1][1][0][2][40] = 76, - [0][1][1][0][1][40] = 16, - [0][1][1][0][3][40] = 127, - [0][1][1][0][5][40] = 76, - [0][1][1][0][6][40] = 16, - [0][1][1][0][9][40] = 76, - [0][1][1][0][8][40] = 42, - [0][1][1][0][11][40] = 40, - [0][1][1][0][2][42] = 76, - [0][1][1][0][1][42] = 16, - [0][1][1][0][3][42] = 127, - [0][1][1][0][5][42] = 76, - [0][1][1][0][6][42] = 16, - [0][1][1][0][9][42] = 76, - [0][1][1][0][8][42] = 42, - [0][1][1][0][11][42] = 40, - [0][1][1][0][2][44] = 76, - [0][1][1][0][1][44] = 16, - [0][1][1][0][3][44] = 127, - [0][1][1][0][5][44] = 76, - [0][1][1][0][6][44] = 16, - [0][1][1][0][9][44] = 76, - [0][1][1][0][8][44] = 42, - [0][1][1][0][11][44] = 40, - [0][1][1][0][2][46] = 76, - [0][1][1][0][1][46] = 16, - [0][1][1][0][3][46] = 127, - [0][1][1][0][5][46] = 76, - [0][1][1][0][6][46] = 16, - [0][1][1][0][9][46] = 76, - [0][1][1][0][8][46] = 42, - [0][1][1][0][11][46] = 40, - [0][0][2][0][2][0] = 76, - [0][0][2][0][1][0] = 58, - [0][0][2][0][3][0] = 62, - [0][0][2][0][5][0] = 62, - [0][0][2][0][6][0] = 58, - [0][0][2][0][9][0] = 58, - [0][0][2][0][8][0] = 30, - [0][0][2][0][11][0] = 52, - [0][0][2][0][2][2] = 76, - [0][0][2][0][1][2] = 58, - [0][0][2][0][3][2] = 62, - [0][0][2][0][5][2] = 62, - [0][0][2][0][6][2] = 58, - [0][0][2][0][9][2] = 58, - [0][0][2][0][8][2] = 30, - [0][0][2][0][11][2] = 52, - [0][0][2][0][2][4] = 76, - [0][0][2][0][1][4] = 58, - [0][0][2][0][3][4] = 62, - [0][0][2][0][5][4] = 62, - [0][0][2][0][6][4] = 58, - [0][0][2][0][9][4] = 58, - [0][0][2][0][8][4] = 30, - [0][0][2][0][11][4] = 52, - [0][0][2][0][2][6] = 76, - [0][0][2][0][1][6] = 58, - [0][0][2][0][3][6] = 62, - [0][0][2][0][5][6] = 62, - [0][0][2][0][6][6] = 54, - [0][0][2][0][9][6] = 58, - [0][0][2][0][8][6] = 30, - [0][0][2][0][11][6] = 52, - [0][0][2][0][2][8] = 76, - [0][0][2][0][1][8] = 58, - [0][0][2][0][3][8] = 62, - [0][0][2][0][5][8] = 64, - [0][0][2][0][6][8] = 58, - [0][0][2][0][9][8] = 58, - [0][0][2][0][8][8] = 54, - [0][0][2][0][11][8] = 52, - [0][0][2][0][2][10] = 76, - [0][0][2][0][1][10] = 58, - [0][0][2][0][3][10] = 62, - [0][0][2][0][5][10] = 64, - [0][0][2][0][6][10] = 58, - [0][0][2][0][9][10] = 58, - [0][0][2][0][8][10] = 54, - [0][0][2][0][11][10] = 52, - [0][0][2][0][2][12] = 76, - [0][0][2][0][1][12] = 58, - [0][0][2][0][3][12] = 62, - [0][0][2][0][5][12] = 64, - [0][0][2][0][6][12] = 58, - [0][0][2][0][9][12] = 58, - [0][0][2][0][8][12] = 54, - [0][0][2][0][11][12] = 52, - [0][0][2][0][2][14] = 76, - [0][0][2][0][1][14] = 58, - [0][0][2][0][3][14] = 62, - [0][0][2][0][5][14] = 64, - [0][0][2][0][6][14] = 58, - [0][0][2][0][9][14] = 58, - [0][0][2][0][8][14] = 54, - [0][0][2][0][11][14] = 52, - [0][0][2][0][2][15] = 74, - [0][0][2][0][1][15] = 58, - [0][0][2][0][3][15] = 76, - [0][0][2][0][5][15] = 74, - [0][0][2][0][6][15] = 58, - [0][0][2][0][9][15] = 58, - [0][0][2][0][8][15] = 54, - [0][0][2][0][11][15] = 52, - [0][0][2][0][2][17] = 76, - [0][0][2][0][1][17] = 58, - [0][0][2][0][3][17] = 76, - [0][0][2][0][5][17] = 76, - [0][0][2][0][6][17] = 58, - [0][0][2][0][9][17] = 58, - [0][0][2][0][8][17] = 54, - [0][0][2][0][11][17] = 52, - [0][0][2][0][2][19] = 76, - [0][0][2][0][1][19] = 58, - [0][0][2][0][3][19] = 76, - [0][0][2][0][5][19] = 76, - [0][0][2][0][6][19] = 58, - [0][0][2][0][9][19] = 58, - [0][0][2][0][8][19] = 54, - [0][0][2][0][11][19] = 52, - [0][0][2][0][2][21] = 76, - [0][0][2][0][1][21] = 58, - [0][0][2][0][3][21] = 76, - [0][0][2][0][5][21] = 76, - [0][0][2][0][6][21] = 58, - [0][0][2][0][9][21] = 58, - [0][0][2][0][8][21] = 54, - [0][0][2][0][11][21] = 52, - [0][0][2][0][2][23] = 76, - [0][0][2][0][1][23] = 58, - [0][0][2][0][3][23] = 76, - [0][0][2][0][5][23] = 76, - [0][0][2][0][6][23] = 58, - [0][0][2][0][9][23] = 58, - [0][0][2][0][8][23] = 54, - [0][0][2][0][11][23] = 52, - [0][0][2][0][2][25] = 76, - [0][0][2][0][1][25] = 58, - [0][0][2][0][3][25] = 76, - [0][0][2][0][5][25] = 127, - [0][0][2][0][6][25] = 58, - [0][0][2][0][9][25] = 127, - [0][0][2][0][8][25] = 54, - [0][0][2][0][11][25] = 52, - [0][0][2][0][2][27] = 76, - [0][0][2][0][1][27] = 58, - [0][0][2][0][3][27] = 76, - [0][0][2][0][5][27] = 127, - [0][0][2][0][6][27] = 58, - [0][0][2][0][9][27] = 127, - [0][0][2][0][8][27] = 54, - [0][0][2][0][11][27] = 52, - [0][0][2][0][2][29] = 76, - [0][0][2][0][1][29] = 58, - [0][0][2][0][3][29] = 76, - [0][0][2][0][5][29] = 127, - [0][0][2][0][6][29] = 58, - [0][0][2][0][9][29] = 127, - [0][0][2][0][8][29] = 54, - [0][0][2][0][11][29] = 52, - [0][0][2][0][2][31] = 76, - [0][0][2][0][1][31] = 58, - [0][0][2][0][3][31] = 76, - [0][0][2][0][5][31] = 76, - [0][0][2][0][6][31] = 58, - [0][0][2][0][9][31] = 58, - [0][0][2][0][8][31] = 54, - [0][0][2][0][11][31] = 52, - [0][0][2][0][2][33] = 76, - [0][0][2][0][1][33] = 58, - [0][0][2][0][3][33] = 76, - [0][0][2][0][5][33] = 76, - [0][0][2][0][6][33] = 58, - [0][0][2][0][9][33] = 58, - [0][0][2][0][8][33] = 54, - [0][0][2][0][11][33] = 52, - [0][0][2][0][2][35] = 70, - [0][0][2][0][1][35] = 58, - [0][0][2][0][3][35] = 76, - [0][0][2][0][5][35] = 70, - [0][0][2][0][6][35] = 58, - [0][0][2][0][9][35] = 58, - [0][0][2][0][8][35] = 54, - [0][0][2][0][11][35] = 52, - [0][0][2][0][2][37] = 76, - [0][0][2][0][1][37] = 127, - [0][0][2][0][3][37] = 76, - [0][0][2][0][5][37] = 76, - [0][0][2][0][6][37] = 58, - [0][0][2][0][9][37] = 76, - [0][0][2][0][8][37] = 54, - [0][0][2][0][11][37] = 127, - [0][0][2][0][2][38] = 76, - [0][0][2][0][1][38] = 28, - [0][0][2][0][3][38] = 127, - [0][0][2][0][5][38] = 76, - [0][0][2][0][6][38] = 28, - [0][0][2][0][9][38] = 76, - [0][0][2][0][8][38] = 54, - [0][0][2][0][11][38] = 52, - [0][0][2][0][2][40] = 76, - [0][0][2][0][1][40] = 28, - [0][0][2][0][3][40] = 127, - [0][0][2][0][5][40] = 76, - [0][0][2][0][6][40] = 28, - [0][0][2][0][9][40] = 76, - [0][0][2][0][8][40] = 54, - [0][0][2][0][11][40] = 52, - [0][0][2][0][2][42] = 76, - [0][0][2][0][1][42] = 28, - [0][0][2][0][3][42] = 127, - [0][0][2][0][5][42] = 76, - [0][0][2][0][6][42] = 28, - [0][0][2][0][9][42] = 76, - [0][0][2][0][8][42] = 54, - [0][0][2][0][11][42] = 52, - [0][0][2][0][2][44] = 76, - [0][0][2][0][1][44] = 28, - [0][0][2][0][3][44] = 127, - [0][0][2][0][5][44] = 76, - [0][0][2][0][6][44] = 28, - [0][0][2][0][9][44] = 76, - [0][0][2][0][8][44] = 54, - [0][0][2][0][11][44] = 52, - [0][0][2][0][2][46] = 76, - [0][0][2][0][1][46] = 28, - [0][0][2][0][3][46] = 127, - [0][0][2][0][5][46] = 76, - [0][0][2][0][6][46] = 28, - [0][0][2][0][9][46] = 76, - [0][0][2][0][8][46] = 54, - [0][0][2][0][11][46] = 52, - [0][1][2][0][2][0] = 68, - [0][1][2][0][1][0] = 46, - [0][1][2][0][3][0] = 50, - [0][1][2][0][5][0] = 40, - [0][1][2][0][6][0] = 46, - [0][1][2][0][9][0] = 46, - [0][1][2][0][8][0] = 18, - [0][1][2][0][11][0] = 40, - [0][1][2][0][2][2] = 68, - [0][1][2][0][1][2] = 46, - [0][1][2][0][3][2] = 50, - [0][1][2][0][5][2] = 40, - [0][1][2][0][6][2] = 46, - [0][1][2][0][9][2] = 46, - [0][1][2][0][8][2] = 18, - [0][1][2][0][11][2] = 40, - [0][1][2][0][2][4] = 68, - [0][1][2][0][1][4] = 46, - [0][1][2][0][3][4] = 50, - [0][1][2][0][5][4] = 40, - [0][1][2][0][6][4] = 46, - [0][1][2][0][9][4] = 46, - [0][1][2][0][8][4] = 18, - [0][1][2][0][11][4] = 40, - [0][1][2][0][2][6] = 68, - [0][1][2][0][1][6] = 46, - [0][1][2][0][3][6] = 50, - [0][1][2][0][5][6] = 40, - [0][1][2][0][6][6] = 36, - [0][1][2][0][9][6] = 46, - [0][1][2][0][8][6] = 18, - [0][1][2][0][11][6] = 40, - [0][1][2][0][2][8] = 68, - [0][1][2][0][1][8] = 46, - [0][1][2][0][3][8] = 50, - [0][1][2][0][5][8] = 52, - [0][1][2][0][6][8] = 46, - [0][1][2][0][9][8] = 46, - [0][1][2][0][8][8] = 42, - [0][1][2][0][11][8] = 40, - [0][1][2][0][2][10] = 68, - [0][1][2][0][1][10] = 46, - [0][1][2][0][3][10] = 50, - [0][1][2][0][5][10] = 52, - [0][1][2][0][6][10] = 46, - [0][1][2][0][9][10] = 46, - [0][1][2][0][8][10] = 42, - [0][1][2][0][11][10] = 40, - [0][1][2][0][2][12] = 68, - [0][1][2][0][1][12] = 46, - [0][1][2][0][3][12] = 50, - [0][1][2][0][5][12] = 52, - [0][1][2][0][6][12] = 46, - [0][1][2][0][9][12] = 46, - [0][1][2][0][8][12] = 42, - [0][1][2][0][11][12] = 40, - [0][1][2][0][2][14] = 68, - [0][1][2][0][1][14] = 46, - [0][1][2][0][3][14] = 50, - [0][1][2][0][5][14] = 52, - [0][1][2][0][6][14] = 46, - [0][1][2][0][9][14] = 46, - [0][1][2][0][8][14] = 42, - [0][1][2][0][11][14] = 40, - [0][1][2][0][2][15] = 68, - [0][1][2][0][1][15] = 46, - [0][1][2][0][3][15] = 70, - [0][1][2][0][5][15] = 68, - [0][1][2][0][6][15] = 46, - [0][1][2][0][9][15] = 46, - [0][1][2][0][8][15] = 42, - [0][1][2][0][11][15] = 40, - [0][1][2][0][2][17] = 68, - [0][1][2][0][1][17] = 46, - [0][1][2][0][3][17] = 70, - [0][1][2][0][5][17] = 68, - [0][1][2][0][6][17] = 46, - [0][1][2][0][9][17] = 46, - [0][1][2][0][8][17] = 42, - [0][1][2][0][11][17] = 40, - [0][1][2][0][2][19] = 68, - [0][1][2][0][1][19] = 46, - [0][1][2][0][3][19] = 70, - [0][1][2][0][5][19] = 68, - [0][1][2][0][6][19] = 46, - [0][1][2][0][9][19] = 46, - [0][1][2][0][8][19] = 42, - [0][1][2][0][11][19] = 40, - [0][1][2][0][2][21] = 68, - [0][1][2][0][1][21] = 46, - [0][1][2][0][3][21] = 70, - [0][1][2][0][5][21] = 68, - [0][1][2][0][6][21] = 46, - [0][1][2][0][9][21] = 46, - [0][1][2][0][8][21] = 42, - [0][1][2][0][11][21] = 40, - [0][1][2][0][2][23] = 68, - [0][1][2][0][1][23] = 46, - [0][1][2][0][3][23] = 70, - [0][1][2][0][5][23] = 68, - [0][1][2][0][6][23] = 46, - [0][1][2][0][9][23] = 46, - [0][1][2][0][8][23] = 42, - [0][1][2][0][11][23] = 40, - [0][1][2][0][2][25] = 68, - [0][1][2][0][1][25] = 46, - [0][1][2][0][3][25] = 70, - [0][1][2][0][5][25] = 127, - [0][1][2][0][6][25] = 46, - [0][1][2][0][9][25] = 127, - [0][1][2][0][8][25] = 42, - [0][1][2][0][11][25] = 40, - [0][1][2][0][2][27] = 68, - [0][1][2][0][1][27] = 46, - [0][1][2][0][3][27] = 70, - [0][1][2][0][5][27] = 127, - [0][1][2][0][6][27] = 46, - [0][1][2][0][9][27] = 127, - [0][1][2][0][8][27] = 42, - [0][1][2][0][11][27] = 40, - [0][1][2][0][2][29] = 68, - [0][1][2][0][1][29] = 46, - [0][1][2][0][3][29] = 70, - [0][1][2][0][5][29] = 127, - [0][1][2][0][6][29] = 46, - [0][1][2][0][9][29] = 127, - [0][1][2][0][8][29] = 42, - [0][1][2][0][11][29] = 40, - [0][1][2][0][2][31] = 68, - [0][1][2][0][1][31] = 46, - [0][1][2][0][3][31] = 70, - [0][1][2][0][5][31] = 68, - [0][1][2][0][6][31] = 46, - [0][1][2][0][9][31] = 46, - [0][1][2][0][8][31] = 42, - [0][1][2][0][11][31] = 40, - [0][1][2][0][2][33] = 68, - [0][1][2][0][1][33] = 46, - [0][1][2][0][3][33] = 70, - [0][1][2][0][5][33] = 68, - [0][1][2][0][6][33] = 46, - [0][1][2][0][9][33] = 46, - [0][1][2][0][8][33] = 42, - [0][1][2][0][11][33] = 40, - [0][1][2][0][2][35] = 64, - [0][1][2][0][1][35] = 46, - [0][1][2][0][3][35] = 70, - [0][1][2][0][5][35] = 64, - [0][1][2][0][6][35] = 46, - [0][1][2][0][9][35] = 46, - [0][1][2][0][8][35] = 42, - [0][1][2][0][11][35] = 40, - [0][1][2][0][2][37] = 68, - [0][1][2][0][1][37] = 127, - [0][1][2][0][3][37] = 70, - [0][1][2][0][5][37] = 68, - [0][1][2][0][6][37] = 46, - [0][1][2][0][9][37] = 68, - [0][1][2][0][8][37] = 42, - [0][1][2][0][11][37] = 127, - [0][1][2][0][2][38] = 76, - [0][1][2][0][1][38] = 16, - [0][1][2][0][3][38] = 127, - [0][1][2][0][5][38] = 76, - [0][1][2][0][6][38] = 16, - [0][1][2][0][9][38] = 76, - [0][1][2][0][8][38] = 42, - [0][1][2][0][11][38] = 40, - [0][1][2][0][2][40] = 76, - [0][1][2][0][1][40] = 16, - [0][1][2][0][3][40] = 127, - [0][1][2][0][5][40] = 76, - [0][1][2][0][6][40] = 16, - [0][1][2][0][9][40] = 76, - [0][1][2][0][8][40] = 42, - [0][1][2][0][11][40] = 40, - [0][1][2][0][2][42] = 76, - [0][1][2][0][1][42] = 16, - [0][1][2][0][3][42] = 127, - [0][1][2][0][5][42] = 76, - [0][1][2][0][6][42] = 16, - [0][1][2][0][9][42] = 76, - [0][1][2][0][8][42] = 42, - [0][1][2][0][11][42] = 40, - [0][1][2][0][2][44] = 76, - [0][1][2][0][1][44] = 16, - [0][1][2][0][3][44] = 127, - [0][1][2][0][5][44] = 76, - [0][1][2][0][6][44] = 16, - [0][1][2][0][9][44] = 76, - [0][1][2][0][8][44] = 42, - [0][1][2][0][11][44] = 40, - [0][1][2][0][2][46] = 76, - [0][1][2][0][1][46] = 16, - [0][1][2][0][3][46] = 127, - [0][1][2][0][5][46] = 76, - [0][1][2][0][6][46] = 16, - [0][1][2][0][9][46] = 76, - [0][1][2][0][8][46] = 42, - [0][1][2][0][11][46] = 40, - [0][1][2][1][2][0] = 68, - [0][1][2][1][1][0] = 34, - [0][1][2][1][3][0] = 50, - [0][1][2][1][5][0] = 38, - [0][1][2][1][6][0] = 34, - [0][1][2][1][9][0] = 34, - [0][1][2][1][8][0] = 6, - [0][1][2][1][11][0] = 28, - [0][1][2][1][2][2] = 68, - [0][1][2][1][1][2] = 34, - [0][1][2][1][3][2] = 50, - [0][1][2][1][5][2] = 38, - [0][1][2][1][6][2] = 34, - [0][1][2][1][9][2] = 34, - [0][1][2][1][8][2] = 6, - [0][1][2][1][11][2] = 28, - [0][1][2][1][2][4] = 68, - [0][1][2][1][1][4] = 34, - [0][1][2][1][3][4] = 50, - [0][1][2][1][5][4] = 38, - [0][1][2][1][6][4] = 34, - [0][1][2][1][9][4] = 34, - [0][1][2][1][8][4] = 6, - [0][1][2][1][11][4] = 28, - [0][1][2][1][2][6] = 68, - [0][1][2][1][1][6] = 34, - [0][1][2][1][3][6] = 50, - [0][1][2][1][5][6] = 38, - [0][1][2][1][6][6] = 34, - [0][1][2][1][9][6] = 34, - [0][1][2][1][8][6] = 6, - [0][1][2][1][11][6] = 28, - [0][1][2][1][2][8] = 68, - [0][1][2][1][1][8] = 34, - [0][1][2][1][3][8] = 50, - [0][1][2][1][5][8] = 38, - [0][1][2][1][6][8] = 34, - [0][1][2][1][9][8] = 34, - [0][1][2][1][8][8] = 30, - [0][1][2][1][11][8] = 28, - [0][1][2][1][2][10] = 68, - [0][1][2][1][1][10] = 34, - [0][1][2][1][3][10] = 50, - [0][1][2][1][5][10] = 38, - [0][1][2][1][6][10] = 34, - [0][1][2][1][9][10] = 34, - [0][1][2][1][8][10] = 30, - [0][1][2][1][11][10] = 28, - [0][1][2][1][2][12] = 68, - [0][1][2][1][1][12] = 34, - [0][1][2][1][3][12] = 50, - [0][1][2][1][5][12] = 38, - [0][1][2][1][6][12] = 34, - [0][1][2][1][9][12] = 34, - [0][1][2][1][8][12] = 30, - [0][1][2][1][11][12] = 28, - [0][1][2][1][2][14] = 68, - [0][1][2][1][1][14] = 34, - [0][1][2][1][3][14] = 50, - [0][1][2][1][5][14] = 38, - [0][1][2][1][6][14] = 34, - [0][1][2][1][9][14] = 34, - [0][1][2][1][8][14] = 30, - [0][1][2][1][11][14] = 28, - [0][1][2][1][2][15] = 68, - [0][1][2][1][1][15] = 34, - [0][1][2][1][3][15] = 70, - [0][1][2][1][5][15] = 62, - [0][1][2][1][6][15] = 34, - [0][1][2][1][9][15] = 34, - [0][1][2][1][8][15] = 30, - [0][1][2][1][11][15] = 28, - [0][1][2][1][2][17] = 68, - [0][1][2][1][1][17] = 34, - [0][1][2][1][3][17] = 70, - [0][1][2][1][5][17] = 62, - [0][1][2][1][6][17] = 34, - [0][1][2][1][9][17] = 34, - [0][1][2][1][8][17] = 30, - [0][1][2][1][11][17] = 28, - [0][1][2][1][2][19] = 68, - [0][1][2][1][1][19] = 34, - [0][1][2][1][3][19] = 70, - [0][1][2][1][5][19] = 62, - [0][1][2][1][6][19] = 34, - [0][1][2][1][9][19] = 34, - [0][1][2][1][8][19] = 30, - [0][1][2][1][11][19] = 28, - [0][1][2][1][2][21] = 68, - [0][1][2][1][1][21] = 34, - [0][1][2][1][3][21] = 70, - [0][1][2][1][5][21] = 62, - [0][1][2][1][6][21] = 34, - [0][1][2][1][9][21] = 34, - [0][1][2][1][8][21] = 30, - [0][1][2][1][11][21] = 28, - [0][1][2][1][2][23] = 68, - [0][1][2][1][1][23] = 34, - [0][1][2][1][3][23] = 70, - [0][1][2][1][5][23] = 62, - [0][1][2][1][6][23] = 34, - [0][1][2][1][9][23] = 34, - [0][1][2][1][8][23] = 30, - [0][1][2][1][11][23] = 28, - [0][1][2][1][2][25] = 68, - [0][1][2][1][1][25] = 34, - [0][1][2][1][3][25] = 70, - [0][1][2][1][5][25] = 127, - [0][1][2][1][6][25] = 34, - [0][1][2][1][9][25] = 127, - [0][1][2][1][8][25] = 30, - [0][1][2][1][11][25] = 28, - [0][1][2][1][2][27] = 68, - [0][1][2][1][1][27] = 34, - [0][1][2][1][3][27] = 70, - [0][1][2][1][5][27] = 127, - [0][1][2][1][6][27] = 34, - [0][1][2][1][9][27] = 127, - [0][1][2][1][8][27] = 30, - [0][1][2][1][11][27] = 28, - [0][1][2][1][2][29] = 68, - [0][1][2][1][1][29] = 34, - [0][1][2][1][3][29] = 70, - [0][1][2][1][5][29] = 127, - [0][1][2][1][6][29] = 34, - [0][1][2][1][9][29] = 127, - [0][1][2][1][8][29] = 30, - [0][1][2][1][11][29] = 28, - [0][1][2][1][2][31] = 68, - [0][1][2][1][1][31] = 34, - [0][1][2][1][3][31] = 70, - [0][1][2][1][5][31] = 62, - [0][1][2][1][6][31] = 34, - [0][1][2][1][9][31] = 34, - [0][1][2][1][8][31] = 30, - [0][1][2][1][11][31] = 28, - [0][1][2][1][2][33] = 68, - [0][1][2][1][1][33] = 34, - [0][1][2][1][3][33] = 70, - [0][1][2][1][5][33] = 62, - [0][1][2][1][6][33] = 34, - [0][1][2][1][9][33] = 34, - [0][1][2][1][8][33] = 30, - [0][1][2][1][11][33] = 28, - [0][1][2][1][2][35] = 64, - [0][1][2][1][1][35] = 34, - [0][1][2][1][3][35] = 70, - [0][1][2][1][5][35] = 62, - [0][1][2][1][6][35] = 34, - [0][1][2][1][9][35] = 34, - [0][1][2][1][8][35] = 30, - [0][1][2][1][11][35] = 28, - [0][1][2][1][2][37] = 68, - [0][1][2][1][1][37] = 127, - [0][1][2][1][3][37] = 70, - [0][1][2][1][5][37] = 62, - [0][1][2][1][6][37] = 34, - [0][1][2][1][9][37] = 68, - [0][1][2][1][8][37] = 30, - [0][1][2][1][11][37] = 127, - [0][1][2][1][2][38] = 76, - [0][1][2][1][1][38] = 4, - [0][1][2][1][3][38] = 127, - [0][1][2][1][5][38] = 76, - [0][1][2][1][6][38] = 4, - [0][1][2][1][9][38] = 76, - [0][1][2][1][8][38] = 30, - [0][1][2][1][11][38] = 28, - [0][1][2][1][2][40] = 76, - [0][1][2][1][1][40] = 4, - [0][1][2][1][3][40] = 127, - [0][1][2][1][5][40] = 76, - [0][1][2][1][6][40] = 4, - [0][1][2][1][9][40] = 76, - [0][1][2][1][8][40] = 30, - [0][1][2][1][11][40] = 28, - [0][1][2][1][2][42] = 76, - [0][1][2][1][1][42] = 4, - [0][1][2][1][3][42] = 127, - [0][1][2][1][5][42] = 76, - [0][1][2][1][6][42] = 4, - [0][1][2][1][9][42] = 76, - [0][1][2][1][8][42] = 30, - [0][1][2][1][11][42] = 28, - [0][1][2][1][2][44] = 76, - [0][1][2][1][1][44] = 4, - [0][1][2][1][3][44] = 127, - [0][1][2][1][5][44] = 76, - [0][1][2][1][6][44] = 4, - [0][1][2][1][9][44] = 76, - [0][1][2][1][8][44] = 30, - [0][1][2][1][11][44] = 28, - [0][1][2][1][2][46] = 76, - [0][1][2][1][1][46] = 4, - [0][1][2][1][3][46] = 127, - [0][1][2][1][5][46] = 76, - [0][1][2][1][6][46] = 4, - [0][1][2][1][9][46] = 76, - [0][1][2][1][8][46] = 30, - [0][1][2][1][11][46] = 28, - [1][0][2][0][2][1] = 68, - [1][0][2][0][1][1] = 64, - [1][0][2][0][3][1] = 62, - [1][0][2][0][5][1] = 64, - [1][0][2][0][6][1] = 64, - [1][0][2][0][9][1] = 64, - [1][0][2][0][8][1] = 30, - [1][0][2][0][11][1] = 52, - [1][0][2][0][2][5] = 72, - [1][0][2][0][1][5] = 64, - [1][0][2][0][3][5] = 62, - [1][0][2][0][5][5] = 64, - [1][0][2][0][6][5] = 60, - [1][0][2][0][9][5] = 64, - [1][0][2][0][8][5] = 30, - [1][0][2][0][11][5] = 52, - [1][0][2][0][2][9] = 72, - [1][0][2][0][1][9] = 64, - [1][0][2][0][3][9] = 62, - [1][0][2][0][5][9] = 64, - [1][0][2][0][6][9] = 64, - [1][0][2][0][9][9] = 64, - [1][0][2][0][8][9] = 54, - [1][0][2][0][11][9] = 52, - [1][0][2][0][2][13] = 66, - [1][0][2][0][1][13] = 64, - [1][0][2][0][3][13] = 62, - [1][0][2][0][5][13] = 64, - [1][0][2][0][6][13] = 64, - [1][0][2][0][9][13] = 64, - [1][0][2][0][8][13] = 54, - [1][0][2][0][11][13] = 52, - [1][0][2][0][2][16] = 62, - [1][0][2][0][1][16] = 64, - [1][0][2][0][3][16] = 72, - [1][0][2][0][5][16] = 62, - [1][0][2][0][6][16] = 64, - [1][0][2][0][9][16] = 64, - [1][0][2][0][8][16] = 54, - [1][0][2][0][11][16] = 52, - [1][0][2][0][2][20] = 72, - [1][0][2][0][1][20] = 64, - [1][0][2][0][3][20] = 72, - [1][0][2][0][5][20] = 72, - [1][0][2][0][6][20] = 64, - [1][0][2][0][9][20] = 64, - [1][0][2][0][8][20] = 54, - [1][0][2][0][11][20] = 52, - [1][0][2][0][2][24] = 72, - [1][0][2][0][1][24] = 64, - [1][0][2][0][3][24] = 72, - [1][0][2][0][5][24] = 127, - [1][0][2][0][6][24] = 64, - [1][0][2][0][9][24] = 127, - [1][0][2][0][8][24] = 54, - [1][0][2][0][11][24] = 52, - [1][0][2][0][2][28] = 72, - [1][0][2][0][1][28] = 64, - [1][0][2][0][3][28] = 72, - [1][0][2][0][5][28] = 127, - [1][0][2][0][6][28] = 64, - [1][0][2][0][9][28] = 127, - [1][0][2][0][8][28] = 54, - [1][0][2][0][11][28] = 52, - [1][0][2][0][2][32] = 72, - [1][0][2][0][1][32] = 64, - [1][0][2][0][3][32] = 72, - [1][0][2][0][5][32] = 72, - [1][0][2][0][6][32] = 64, - [1][0][2][0][9][32] = 64, - [1][0][2][0][8][32] = 54, - [1][0][2][0][11][32] = 52, - [1][0][2][0][2][36] = 72, - [1][0][2][0][1][36] = 127, - [1][0][2][0][3][36] = 72, - [1][0][2][0][5][36] = 72, - [1][0][2][0][6][36] = 64, - [1][0][2][0][9][36] = 72, - [1][0][2][0][8][36] = 54, - [1][0][2][0][11][36] = 127, - [1][0][2][0][2][39] = 72, - [1][0][2][0][1][39] = 28, - [1][0][2][0][3][39] = 127, - [1][0][2][0][5][39] = 72, - [1][0][2][0][6][39] = 28, - [1][0][2][0][9][39] = 72, - [1][0][2][0][8][39] = 54, - [1][0][2][0][11][39] = 52, - [1][0][2][0][2][43] = 72, - [1][0][2][0][1][43] = 28, - [1][0][2][0][3][43] = 127, - [1][0][2][0][5][43] = 72, - [1][0][2][0][6][43] = 28, - [1][0][2][0][9][43] = 72, - [1][0][2][0][8][43] = 54, - [1][0][2][0][11][43] = 52, - [1][1][2][0][2][1] = 58, - [1][1][2][0][1][1] = 52, - [1][1][2][0][3][1] = 50, - [1][1][2][0][5][1] = 52, - [1][1][2][0][6][1] = 52, - [1][1][2][0][9][1] = 52, - [1][1][2][0][8][1] = 18, - [1][1][2][0][11][1] = 40, - [1][1][2][0][2][5] = 72, - [1][1][2][0][1][5] = 52, - [1][1][2][0][3][5] = 50, - [1][1][2][0][5][5] = 52, - [1][1][2][0][6][5] = 46, - [1][1][2][0][9][5] = 52, - [1][1][2][0][8][5] = 18, - [1][1][2][0][11][5] = 40, - [1][1][2][0][2][9] = 72, - [1][1][2][0][1][9] = 52, - [1][1][2][0][3][9] = 50, - [1][1][2][0][5][9] = 52, - [1][1][2][0][6][9] = 52, - [1][1][2][0][9][9] = 52, - [1][1][2][0][8][9] = 42, - [1][1][2][0][11][9] = 40, - [1][1][2][0][2][13] = 58, - [1][1][2][0][1][13] = 52, - [1][1][2][0][3][13] = 50, - [1][1][2][0][5][13] = 52, - [1][1][2][0][6][13] = 52, - [1][1][2][0][9][13] = 52, - [1][1][2][0][8][13] = 42, - [1][1][2][0][11][13] = 40, - [1][1][2][0][2][16] = 56, - [1][1][2][0][1][16] = 52, - [1][1][2][0][3][16] = 72, - [1][1][2][0][5][16] = 56, - [1][1][2][0][6][16] = 52, - [1][1][2][0][9][16] = 52, - [1][1][2][0][8][16] = 42, - [1][1][2][0][11][16] = 40, - [1][1][2][0][2][20] = 72, - [1][1][2][0][1][20] = 52, - [1][1][2][0][3][20] = 72, - [1][1][2][0][5][20] = 72, - [1][1][2][0][6][20] = 52, - [1][1][2][0][9][20] = 52, - [1][1][2][0][8][20] = 42, - [1][1][2][0][11][20] = 40, - [1][1][2][0][2][24] = 72, - [1][1][2][0][1][24] = 52, - [1][1][2][0][3][24] = 72, - [1][1][2][0][5][24] = 127, - [1][1][2][0][6][24] = 52, - [1][1][2][0][9][24] = 127, - [1][1][2][0][8][24] = 42, - [1][1][2][0][11][24] = 40, - [1][1][2][0][2][28] = 72, - [1][1][2][0][1][28] = 52, - [1][1][2][0][3][28] = 72, - [1][1][2][0][5][28] = 127, - [1][1][2][0][6][28] = 52, - [1][1][2][0][9][28] = 127, - [1][1][2][0][8][28] = 42, - [1][1][2][0][11][28] = 40, - [1][1][2][0][2][32] = 68, - [1][1][2][0][1][32] = 52, - [1][1][2][0][3][32] = 72, - [1][1][2][0][5][32] = 68, - [1][1][2][0][6][32] = 52, - [1][1][2][0][9][32] = 52, - [1][1][2][0][8][32] = 42, - [1][1][2][0][11][32] = 40, - [1][1][2][0][2][36] = 72, - [1][1][2][0][1][36] = 127, - [1][1][2][0][3][36] = 72, - [1][1][2][0][5][36] = 72, - [1][1][2][0][6][36] = 52, - [1][1][2][0][9][36] = 72, - [1][1][2][0][8][36] = 42, - [1][1][2][0][11][36] = 127, - [1][1][2][0][2][39] = 72, - [1][1][2][0][1][39] = 16, - [1][1][2][0][3][39] = 127, - [1][1][2][0][5][39] = 72, - [1][1][2][0][6][39] = 16, - [1][1][2][0][9][39] = 72, - [1][1][2][0][8][39] = 42, - [1][1][2][0][11][39] = 40, - [1][1][2][0][2][43] = 72, - [1][1][2][0][1][43] = 16, - [1][1][2][0][3][43] = 127, - [1][1][2][0][5][43] = 72, - [1][1][2][0][6][43] = 16, - [1][1][2][0][9][43] = 72, - [1][1][2][0][8][43] = 42, - [1][1][2][0][11][43] = 40, - [1][1][2][1][2][1] = 58, - [1][1][2][1][1][1] = 40, - [1][1][2][1][3][1] = 50, - [1][1][2][1][5][1] = 40, - [1][1][2][1][6][1] = 40, - [1][1][2][1][9][1] = 40, - [1][1][2][1][8][1] = 6, - [1][1][2][1][11][1] = 28, - [1][1][2][1][2][5] = 68, - [1][1][2][1][1][5] = 40, - [1][1][2][1][3][5] = 50, - [1][1][2][1][5][5] = 40, - [1][1][2][1][6][5] = 40, - [1][1][2][1][9][5] = 40, - [1][1][2][1][8][5] = 6, - [1][1][2][1][11][5] = 28, - [1][1][2][1][2][9] = 68, - [1][1][2][1][1][9] = 40, - [1][1][2][1][3][9] = 50, - [1][1][2][1][5][9] = 40, - [1][1][2][1][6][9] = 40, - [1][1][2][1][9][9] = 40, - [1][1][2][1][8][9] = 30, - [1][1][2][1][11][9] = 28, - [1][1][2][1][2][13] = 58, - [1][1][2][1][1][13] = 40, - [1][1][2][1][3][13] = 50, - [1][1][2][1][5][13] = 40, - [1][1][2][1][6][13] = 40, - [1][1][2][1][9][13] = 40, - [1][1][2][1][8][13] = 30, - [1][1][2][1][11][13] = 28, - [1][1][2][1][2][16] = 56, - [1][1][2][1][1][16] = 40, - [1][1][2][1][3][16] = 72, - [1][1][2][1][5][16] = 56, - [1][1][2][1][6][16] = 40, - [1][1][2][1][9][16] = 40, - [1][1][2][1][8][16] = 30, - [1][1][2][1][11][16] = 28, - [1][1][2][1][2][20] = 68, - [1][1][2][1][1][20] = 40, - [1][1][2][1][3][20] = 72, - [1][1][2][1][5][20] = 68, - [1][1][2][1][6][20] = 40, - [1][1][2][1][9][20] = 40, - [1][1][2][1][8][20] = 30, - [1][1][2][1][11][20] = 28, - [1][1][2][1][2][24] = 68, - [1][1][2][1][1][24] = 40, - [1][1][2][1][3][24] = 72, - [1][1][2][1][5][24] = 127, - [1][1][2][1][6][24] = 40, - [1][1][2][1][9][24] = 127, - [1][1][2][1][8][24] = 30, - [1][1][2][1][11][24] = 28, - [1][1][2][1][2][28] = 68, - [1][1][2][1][1][28] = 40, - [1][1][2][1][3][28] = 72, - [1][1][2][1][5][28] = 127, - [1][1][2][1][6][28] = 40, - [1][1][2][1][9][28] = 127, - [1][1][2][1][8][28] = 30, - [1][1][2][1][11][28] = 28, - [1][1][2][1][2][32] = 68, - [1][1][2][1][1][32] = 40, - [1][1][2][1][3][32] = 72, - [1][1][2][1][5][32] = 68, - [1][1][2][1][6][32] = 40, - [1][1][2][1][9][32] = 40, - [1][1][2][1][8][32] = 30, - [1][1][2][1][11][32] = 28, - [1][1][2][1][2][36] = 68, - [1][1][2][1][1][36] = 127, - [1][1][2][1][3][36] = 72, - [1][1][2][1][5][36] = 68, - [1][1][2][1][6][36] = 40, - [1][1][2][1][9][36] = 68, - [1][1][2][1][8][36] = 30, - [1][1][2][1][11][36] = 127, - [1][1][2][1][2][39] = 72, - [1][1][2][1][1][39] = 4, - [1][1][2][1][3][39] = 127, - [1][1][2][1][5][39] = 72, - [1][1][2][1][6][39] = 4, - [1][1][2][1][9][39] = 72, - [1][1][2][1][8][39] = 30, - [1][1][2][1][11][39] = 28, - [1][1][2][1][2][43] = 72, - [1][1][2][1][1][43] = 4, - [1][1][2][1][3][43] = 127, - [1][1][2][1][5][43] = 72, - [1][1][2][1][6][43] = 4, - [1][1][2][1][9][43] = 72, - [1][1][2][1][8][43] = 30, - [1][1][2][1][11][43] = 28, - [2][0][2][0][2][3] = 64, - [2][0][2][0][1][3] = 64, - [2][0][2][0][3][3] = 64, - [2][0][2][0][5][3] = 62, - [2][0][2][0][6][3] = 64, - [2][0][2][0][9][3] = 64, - [2][0][2][0][8][3] = 30, - [2][0][2][0][11][3] = 52, - [2][0][2][0][2][11] = 64, - [2][0][2][0][1][11] = 64, - [2][0][2][0][3][11] = 64, - [2][0][2][0][5][11] = 62, - [2][0][2][0][6][11] = 64, - [2][0][2][0][9][11] = 64, - [2][0][2][0][8][11] = 54, - [2][0][2][0][11][11] = 52, - [2][0][2][0][2][18] = 62, - [2][0][2][0][1][18] = 64, - [2][0][2][0][3][18] = 72, - [2][0][2][0][5][18] = 66, - [2][0][2][0][6][18] = 64, - [2][0][2][0][9][18] = 64, - [2][0][2][0][8][18] = 54, - [2][0][2][0][11][18] = 52, - [2][0][2][0][2][26] = 72, - [2][0][2][0][1][26] = 64, - [2][0][2][0][3][26] = 72, - [2][0][2][0][5][26] = 127, - [2][0][2][0][6][26] = 64, - [2][0][2][0][9][26] = 127, - [2][0][2][0][8][26] = 54, - [2][0][2][0][11][26] = 52, - [2][0][2][0][2][34] = 72, - [2][0][2][0][1][34] = 127, - [2][0][2][0][3][34] = 72, - [2][0][2][0][5][34] = 72, - [2][0][2][0][6][34] = 64, - [2][0][2][0][9][34] = 72, - [2][0][2][0][8][34] = 54, - [2][0][2][0][11][34] = 127, - [2][0][2][0][2][41] = 72, - [2][0][2][0][1][41] = 28, - [2][0][2][0][3][41] = 127, - [2][0][2][0][5][41] = 72, - [2][0][2][0][6][41] = 28, - [2][0][2][0][9][41] = 72, - [2][0][2][0][8][41] = 54, - [2][0][2][0][11][41] = 52, - [2][1][2][0][2][3] = 56, - [2][1][2][0][1][3] = 52, - [2][1][2][0][3][3] = 52, - [2][1][2][0][5][3] = 52, - [2][1][2][0][6][3] = 52, - [2][1][2][0][9][3] = 52, - [2][1][2][0][8][3] = 18, - [2][1][2][0][11][3] = 40, - [2][1][2][0][2][11] = 56, - [2][1][2][0][1][11] = 52, - [2][1][2][0][3][11] = 52, - [2][1][2][0][5][11] = 52, - [2][1][2][0][6][11] = 52, - [2][1][2][0][9][11] = 52, - [2][1][2][0][8][11] = 42, - [2][1][2][0][11][11] = 40, - [2][1][2][0][2][18] = 56, - [2][1][2][0][1][18] = 52, - [2][1][2][0][3][18] = 72, - [2][1][2][0][5][18] = 56, - [2][1][2][0][6][18] = 52, - [2][1][2][0][9][18] = 52, - [2][1][2][0][8][18] = 42, - [2][1][2][0][11][18] = 40, - [2][1][2][0][2][26] = 72, - [2][1][2][0][1][26] = 52, - [2][1][2][0][3][26] = 72, - [2][1][2][0][5][26] = 127, - [2][1][2][0][6][26] = 52, - [2][1][2][0][9][26] = 127, - [2][1][2][0][8][26] = 42, - [2][1][2][0][11][26] = 40, - [2][1][2][0][2][34] = 72, - [2][1][2][0][1][34] = 127, - [2][1][2][0][3][34] = 72, - [2][1][2][0][5][34] = 72, - [2][1][2][0][6][34] = 52, - [2][1][2][0][9][34] = 72, - [2][1][2][0][8][34] = 42, - [2][1][2][0][11][34] = 127, - [2][1][2][0][2][41] = 72, - [2][1][2][0][1][41] = 16, - [2][1][2][0][3][41] = 127, - [2][1][2][0][5][41] = 72, - [2][1][2][0][6][41] = 16, - [2][1][2][0][9][41] = 72, - [2][1][2][0][8][41] = 42, - [2][1][2][0][11][41] = 40, - [2][1][2][1][2][3] = 56, - [2][1][2][1][1][3] = 40, - [2][1][2][1][3][3] = 52, - [2][1][2][1][5][3] = 40, - [2][1][2][1][6][3] = 40, - [2][1][2][1][9][3] = 40, - [2][1][2][1][8][3] = 6, - [2][1][2][1][11][3] = 28, - [2][1][2][1][2][11] = 56, - [2][1][2][1][1][11] = 40, - [2][1][2][1][3][11] = 52, - [2][1][2][1][5][11] = 40, - [2][1][2][1][6][11] = 40, - [2][1][2][1][9][11] = 40, - [2][1][2][1][8][11] = 30, - [2][1][2][1][11][11] = 28, - [2][1][2][1][2][18] = 56, - [2][1][2][1][1][18] = 40, - [2][1][2][1][3][18] = 72, - [2][1][2][1][5][18] = 56, - [2][1][2][1][6][18] = 40, - [2][1][2][1][9][18] = 40, - [2][1][2][1][8][18] = 30, - [2][1][2][1][11][18] = 28, - [2][1][2][1][2][26] = 68, - [2][1][2][1][1][26] = 40, - [2][1][2][1][3][26] = 72, - [2][1][2][1][5][26] = 127, - [2][1][2][1][6][26] = 40, - [2][1][2][1][9][26] = 127, - [2][1][2][1][8][26] = 30, - [2][1][2][1][11][26] = 28, - [2][1][2][1][2][34] = 68, - [2][1][2][1][1][34] = 127, - [2][1][2][1][3][34] = 72, - [2][1][2][1][5][34] = 68, - [2][1][2][1][6][34] = 40, - [2][1][2][1][9][34] = 68, - [2][1][2][1][8][34] = 30, - [2][1][2][1][11][34] = 127, - [2][1][2][1][2][41] = 72, - [2][1][2][1][1][41] = 4, - [2][1][2][1][3][41] = 127, - [2][1][2][1][5][41] = 72, - [2][1][2][1][6][41] = 4, - [2][1][2][1][9][41] = 72, - [2][1][2][1][8][41] = 30, - [2][1][2][1][11][41] = 28, + [0][0][1][0][RTW89_WW][0] = 30, + [0][0][1][0][RTW89_WW][2] = 30, + [0][0][1][0][RTW89_WW][4] = 30, + [0][0][1][0][RTW89_WW][6] = 30, + [0][0][1][0][RTW89_WW][8] = 52, + [0][0][1][0][RTW89_WW][10] = 52, + [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][14] = 52, + [0][0][1][0][RTW89_WW][15] = 52, + [0][0][1][0][RTW89_WW][17] = 52, + [0][0][1][0][RTW89_WW][19] = 52, + [0][0][1][0][RTW89_WW][21] = 52, + [0][0][1][0][RTW89_WW][23] = 52, + [0][0][1][0][RTW89_WW][25] = 52, + [0][0][1][0][RTW89_WW][27] = 52, + [0][0][1][0][RTW89_WW][29] = 52, + [0][0][1][0][RTW89_WW][31] = 52, + [0][0][1][0][RTW89_WW][33] = 52, + [0][0][1][0][RTW89_WW][35] = 52, + [0][0][1][0][RTW89_WW][37] = 54, + [0][0][1][0][RTW89_WW][38] = 28, + [0][0][1][0][RTW89_WW][40] = 28, + [0][0][1][0][RTW89_WW][42] = 28, + [0][0][1][0][RTW89_WW][44] = 28, + [0][0][1][0][RTW89_WW][46] = 28, + [0][1][1][0][RTW89_WW][0] = 18, + [0][1][1][0][RTW89_WW][2] = 18, + [0][1][1][0][RTW89_WW][4] = 18, + [0][1][1][0][RTW89_WW][6] = 18, + [0][1][1][0][RTW89_WW][8] = 40, + [0][1][1][0][RTW89_WW][10] = 40, + [0][1][1][0][RTW89_WW][12] = 40, + [0][1][1][0][RTW89_WW][14] = 40, + [0][1][1][0][RTW89_WW][15] = 40, + [0][1][1][0][RTW89_WW][17] = 40, + [0][1][1][0][RTW89_WW][19] = 40, + [0][1][1][0][RTW89_WW][21] = 40, + [0][1][1][0][RTW89_WW][23] = 40, + [0][1][1][0][RTW89_WW][25] = 40, + [0][1][1][0][RTW89_WW][27] = 40, + [0][1][1][0][RTW89_WW][29] = 40, + [0][1][1][0][RTW89_WW][31] = 40, + [0][1][1][0][RTW89_WW][33] = 40, + [0][1][1][0][RTW89_WW][35] = 40, + [0][1][1][0][RTW89_WW][37] = 42, + [0][1][1][0][RTW89_WW][38] = 16, + [0][1][1][0][RTW89_WW][40] = 16, + [0][1][1][0][RTW89_WW][42] = 16, + [0][1][1][0][RTW89_WW][44] = 16, + [0][1][1][0][RTW89_WW][46] = 16, + [0][0][2][0][RTW89_WW][0] = 30, + [0][0][2][0][RTW89_WW][2] = 30, + [0][0][2][0][RTW89_WW][4] = 30, + [0][0][2][0][RTW89_WW][6] = 30, + [0][0][2][0][RTW89_WW][8] = 52, + [0][0][2][0][RTW89_WW][10] = 52, + [0][0][2][0][RTW89_WW][12] = 52, + [0][0][2][0][RTW89_WW][14] = 52, + [0][0][2][0][RTW89_WW][15] = 52, + [0][0][2][0][RTW89_WW][17] = 52, + [0][0][2][0][RTW89_WW][19] = 52, + [0][0][2][0][RTW89_WW][21] = 52, + [0][0][2][0][RTW89_WW][23] = 52, + [0][0][2][0][RTW89_WW][25] = 52, + [0][0][2][0][RTW89_WW][27] = 52, + [0][0][2][0][RTW89_WW][29] = 52, + [0][0][2][0][RTW89_WW][31] = 52, + [0][0][2][0][RTW89_WW][33] = 52, + [0][0][2][0][RTW89_WW][35] = 52, + [0][0][2][0][RTW89_WW][37] = 54, + [0][0][2][0][RTW89_WW][38] = 28, + [0][0][2][0][RTW89_WW][40] = 28, + [0][0][2][0][RTW89_WW][42] = 28, + [0][0][2][0][RTW89_WW][44] = 28, + [0][0][2][0][RTW89_WW][46] = 28, + [0][1][2][0][RTW89_WW][0] = 18, + [0][1][2][0][RTW89_WW][2] = 18, + [0][1][2][0][RTW89_WW][4] = 18, + [0][1][2][0][RTW89_WW][6] = 18, + [0][1][2][0][RTW89_WW][8] = 40, + [0][1][2][0][RTW89_WW][10] = 40, + [0][1][2][0][RTW89_WW][12] = 40, + [0][1][2][0][RTW89_WW][14] = 40, + [0][1][2][0][RTW89_WW][15] = 40, + [0][1][2][0][RTW89_WW][17] = 40, + [0][1][2][0][RTW89_WW][19] = 40, + [0][1][2][0][RTW89_WW][21] = 40, + [0][1][2][0][RTW89_WW][23] = 40, + [0][1][2][0][RTW89_WW][25] = 40, + [0][1][2][0][RTW89_WW][27] = 40, + [0][1][2][0][RTW89_WW][29] = 40, + [0][1][2][0][RTW89_WW][31] = 40, + [0][1][2][0][RTW89_WW][33] = 40, + [0][1][2][0][RTW89_WW][35] = 40, + [0][1][2][0][RTW89_WW][37] = 42, + [0][1][2][0][RTW89_WW][38] = 16, + [0][1][2][0][RTW89_WW][40] = 16, + [0][1][2][0][RTW89_WW][42] = 16, + [0][1][2][0][RTW89_WW][44] = 16, + [0][1][2][0][RTW89_WW][46] = 16, + [0][1][2][1][RTW89_WW][0] = 6, + [0][1][2][1][RTW89_WW][2] = 6, + [0][1][2][1][RTW89_WW][4] = 6, + [0][1][2][1][RTW89_WW][6] = 6, + [0][1][2][1][RTW89_WW][8] = 28, + [0][1][2][1][RTW89_WW][10] = 28, + [0][1][2][1][RTW89_WW][12] = 28, + [0][1][2][1][RTW89_WW][14] = 28, + [0][1][2][1][RTW89_WW][15] = 28, + [0][1][2][1][RTW89_WW][17] = 28, + [0][1][2][1][RTW89_WW][19] = 28, + [0][1][2][1][RTW89_WW][21] = 28, + [0][1][2][1][RTW89_WW][23] = 28, + [0][1][2][1][RTW89_WW][25] = 28, + [0][1][2][1][RTW89_WW][27] = 28, + [0][1][2][1][RTW89_WW][29] = 28, + [0][1][2][1][RTW89_WW][31] = 28, + [0][1][2][1][RTW89_WW][33] = 28, + [0][1][2][1][RTW89_WW][35] = 28, + [0][1][2][1][RTW89_WW][37] = 30, + [0][1][2][1][RTW89_WW][38] = 4, + [0][1][2][1][RTW89_WW][40] = 4, + [0][1][2][1][RTW89_WW][42] = 4, + [0][1][2][1][RTW89_WW][44] = 4, + [0][1][2][1][RTW89_WW][46] = 4, + [1][0][2][0][RTW89_WW][1] = 30, + [1][0][2][0][RTW89_WW][5] = 30, + [1][0][2][0][RTW89_WW][9] = 52, + [1][0][2][0][RTW89_WW][13] = 52, + [1][0][2][0][RTW89_WW][16] = 52, + [1][0][2][0][RTW89_WW][20] = 52, + [1][0][2][0][RTW89_WW][24] = 52, + [1][0][2][0][RTW89_WW][28] = 52, + [1][0][2][0][RTW89_WW][32] = 52, + [1][0][2][0][RTW89_WW][36] = 54, + [1][0][2][0][RTW89_WW][39] = 28, + [1][0][2][0][RTW89_WW][43] = 28, + [1][1][2][0][RTW89_WW][1] = 18, + [1][1][2][0][RTW89_WW][5] = 18, + [1][1][2][0][RTW89_WW][9] = 40, + [1][1][2][0][RTW89_WW][13] = 40, + [1][1][2][0][RTW89_WW][16] = 40, + [1][1][2][0][RTW89_WW][20] = 40, + [1][1][2][0][RTW89_WW][24] = 40, + [1][1][2][0][RTW89_WW][28] = 40, + [1][1][2][0][RTW89_WW][32] = 40, + [1][1][2][0][RTW89_WW][36] = 42, + [1][1][2][0][RTW89_WW][39] = 16, + [1][1][2][0][RTW89_WW][43] = 16, + [1][1][2][1][RTW89_WW][1] = 6, + [1][1][2][1][RTW89_WW][5] = 6, + [1][1][2][1][RTW89_WW][9] = 28, + [1][1][2][1][RTW89_WW][13] = 28, + [1][1][2][1][RTW89_WW][16] = 28, + [1][1][2][1][RTW89_WW][20] = 28, + [1][1][2][1][RTW89_WW][24] = 28, + [1][1][2][1][RTW89_WW][28] = 28, + [1][1][2][1][RTW89_WW][32] = 28, + [1][1][2][1][RTW89_WW][36] = 30, + [1][1][2][1][RTW89_WW][39] = 4, + [1][1][2][1][RTW89_WW][43] = 4, + [2][0][2][0][RTW89_WW][3] = 30, + [2][0][2][0][RTW89_WW][11] = 52, + [2][0][2][0][RTW89_WW][18] = 52, + [2][0][2][0][RTW89_WW][26] = 52, + [2][0][2][0][RTW89_WW][34] = 54, + [2][0][2][0][RTW89_WW][41] = 28, + [2][1][2][0][RTW89_WW][3] = 18, + [2][1][2][0][RTW89_WW][11] = 40, + [2][1][2][0][RTW89_WW][18] = 40, + [2][1][2][0][RTW89_WW][26] = 40, + [2][1][2][0][RTW89_WW][34] = 42, + [2][1][2][0][RTW89_WW][41] = 16, + [2][1][2][1][RTW89_WW][3] = 6, + [2][1][2][1][RTW89_WW][11] = 28, + [2][1][2][1][RTW89_WW][18] = 28, + [2][1][2][1][RTW89_WW][26] = 28, + [2][1][2][1][RTW89_WW][34] = 30, + [2][1][2][1][RTW89_WW][41] = 4, + [0][0][1][0][RTW89_FCC][0] = 76, + [0][0][1][0][RTW89_ETSI][0] = 58, + [0][0][1][0][RTW89_MKK][0] = 62, + [0][0][1][0][RTW89_IC][0] = 62, + [0][0][1][0][RTW89_KCC][0] = 76, + [0][0][1][0][RTW89_ACMA][0] = 58, + [0][0][1][0][RTW89_CHILE][0] = 30, + [0][0][1][0][RTW89_UKRAINE][0] = 52, + [0][0][1][0][RTW89_MEXICO][0] = 62, + [0][0][1][0][RTW89_CN][0] = 58, + [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_FCC][2] = 76, + [0][0][1][0][RTW89_ETSI][2] = 58, + [0][0][1][0][RTW89_MKK][2] = 62, + [0][0][1][0][RTW89_IC][2] = 62, + [0][0][1][0][RTW89_KCC][2] = 76, + [0][0][1][0][RTW89_ACMA][2] = 58, + [0][0][1][0][RTW89_CHILE][2] = 30, + [0][0][1][0][RTW89_UKRAINE][2] = 52, + [0][0][1][0][RTW89_MEXICO][2] = 62, + [0][0][1][0][RTW89_CN][2] = 58, + [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_FCC][4] = 76, + [0][0][1][0][RTW89_ETSI][4] = 58, + [0][0][1][0][RTW89_MKK][4] = 62, + [0][0][1][0][RTW89_IC][4] = 62, + [0][0][1][0][RTW89_KCC][4] = 76, + [0][0][1][0][RTW89_ACMA][4] = 58, + [0][0][1][0][RTW89_CHILE][4] = 30, + [0][0][1][0][RTW89_UKRAINE][4] = 52, + [0][0][1][0][RTW89_MEXICO][4] = 62, + [0][0][1][0][RTW89_CN][4] = 58, + [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_FCC][6] = 76, + [0][0][1][0][RTW89_ETSI][6] = 58, + [0][0][1][0][RTW89_MKK][6] = 62, + [0][0][1][0][RTW89_IC][6] = 62, + [0][0][1][0][RTW89_KCC][6] = 58, + [0][0][1][0][RTW89_ACMA][6] = 58, + [0][0][1][0][RTW89_CHILE][6] = 30, + [0][0][1][0][RTW89_UKRAINE][6] = 52, + [0][0][1][0][RTW89_MEXICO][6] = 62, + [0][0][1][0][RTW89_CN][6] = 58, + [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_FCC][8] = 76, + [0][0][1][0][RTW89_ETSI][8] = 58, + [0][0][1][0][RTW89_MKK][8] = 62, + [0][0][1][0][RTW89_IC][8] = 64, + [0][0][1][0][RTW89_KCC][8] = 76, + [0][0][1][0][RTW89_ACMA][8] = 58, + [0][0][1][0][RTW89_CHILE][8] = 54, + [0][0][1][0][RTW89_UKRAINE][8] = 52, + [0][0][1][0][RTW89_MEXICO][8] = 76, + [0][0][1][0][RTW89_CN][8] = 58, + [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_FCC][10] = 76, + [0][0][1][0][RTW89_ETSI][10] = 58, + [0][0][1][0][RTW89_MKK][10] = 62, + [0][0][1][0][RTW89_IC][10] = 64, + [0][0][1][0][RTW89_KCC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 58, + [0][0][1][0][RTW89_CHILE][10] = 54, + [0][0][1][0][RTW89_UKRAINE][10] = 52, + [0][0][1][0][RTW89_MEXICO][10] = 76, + [0][0][1][0][RTW89_CN][10] = 58, + [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_FCC][12] = 76, + [0][0][1][0][RTW89_ETSI][12] = 58, + [0][0][1][0][RTW89_MKK][12] = 62, + [0][0][1][0][RTW89_IC][12] = 64, + [0][0][1][0][RTW89_KCC][12] = 76, + [0][0][1][0][RTW89_ACMA][12] = 58, + [0][0][1][0][RTW89_CHILE][12] = 54, + [0][0][1][0][RTW89_UKRAINE][12] = 52, + [0][0][1][0][RTW89_MEXICO][12] = 76, + [0][0][1][0][RTW89_CN][12] = 58, + [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_FCC][14] = 76, + [0][0][1][0][RTW89_ETSI][14] = 58, + [0][0][1][0][RTW89_MKK][14] = 62, + [0][0][1][0][RTW89_IC][14] = 64, + [0][0][1][0][RTW89_KCC][14] = 76, + [0][0][1][0][RTW89_ACMA][14] = 58, + [0][0][1][0][RTW89_CHILE][14] = 54, + [0][0][1][0][RTW89_UKRAINE][14] = 52, + [0][0][1][0][RTW89_MEXICO][14] = 76, + [0][0][1][0][RTW89_CN][14] = 58, + [0][0][1][0][RTW89_QATAR][14] = 58, + [0][0][1][0][RTW89_FCC][15] = 76, + [0][0][1][0][RTW89_ETSI][15] = 58, + [0][0][1][0][RTW89_MKK][15] = 76, + [0][0][1][0][RTW89_IC][15] = 76, + [0][0][1][0][RTW89_KCC][15] = 76, + [0][0][1][0][RTW89_ACMA][15] = 58, + [0][0][1][0][RTW89_CHILE][15] = 54, + [0][0][1][0][RTW89_UKRAINE][15] = 52, + [0][0][1][0][RTW89_MEXICO][15] = 76, + [0][0][1][0][RTW89_CN][15] = 127, + [0][0][1][0][RTW89_QATAR][15] = 52, + [0][0][1][0][RTW89_FCC][17] = 76, + [0][0][1][0][RTW89_ETSI][17] = 58, + [0][0][1][0][RTW89_MKK][17] = 76, + [0][0][1][0][RTW89_IC][17] = 76, + [0][0][1][0][RTW89_KCC][17] = 76, + [0][0][1][0][RTW89_ACMA][17] = 58, + [0][0][1][0][RTW89_CHILE][17] = 54, + [0][0][1][0][RTW89_UKRAINE][17] = 52, + [0][0][1][0][RTW89_MEXICO][17] = 76, + [0][0][1][0][RTW89_CN][17] = 127, + [0][0][1][0][RTW89_QATAR][17] = 52, + [0][0][1][0][RTW89_FCC][19] = 76, + [0][0][1][0][RTW89_ETSI][19] = 58, + [0][0][1][0][RTW89_MKK][19] = 76, + [0][0][1][0][RTW89_IC][19] = 76, + [0][0][1][0][RTW89_KCC][19] = 76, + [0][0][1][0][RTW89_ACMA][19] = 58, + [0][0][1][0][RTW89_CHILE][19] = 54, + [0][0][1][0][RTW89_UKRAINE][19] = 52, + [0][0][1][0][RTW89_MEXICO][19] = 76, + [0][0][1][0][RTW89_CN][19] = 127, + [0][0][1][0][RTW89_QATAR][19] = 52, + [0][0][1][0][RTW89_FCC][21] = 76, + [0][0][1][0][RTW89_ETSI][21] = 58, + [0][0][1][0][RTW89_MKK][21] = 76, + [0][0][1][0][RTW89_IC][21] = 76, + [0][0][1][0][RTW89_KCC][21] = 76, + [0][0][1][0][RTW89_ACMA][21] = 58, + [0][0][1][0][RTW89_CHILE][21] = 54, + [0][0][1][0][RTW89_UKRAINE][21] = 52, + [0][0][1][0][RTW89_MEXICO][21] = 76, + [0][0][1][0][RTW89_CN][21] = 127, + [0][0][1][0][RTW89_QATAR][21] = 52, + [0][0][1][0][RTW89_FCC][23] = 76, + [0][0][1][0][RTW89_ETSI][23] = 58, + [0][0][1][0][RTW89_MKK][23] = 76, + [0][0][1][0][RTW89_IC][23] = 76, + [0][0][1][0][RTW89_KCC][23] = 76, + [0][0][1][0][RTW89_ACMA][23] = 58, + [0][0][1][0][RTW89_CHILE][23] = 54, + [0][0][1][0][RTW89_UKRAINE][23] = 52, + [0][0][1][0][RTW89_MEXICO][23] = 76, + [0][0][1][0][RTW89_CN][23] = 127, + [0][0][1][0][RTW89_QATAR][23] = 52, + [0][0][1][0][RTW89_FCC][25] = 76, + [0][0][1][0][RTW89_ETSI][25] = 58, + [0][0][1][0][RTW89_MKK][25] = 76, + [0][0][1][0][RTW89_IC][25] = 127, + [0][0][1][0][RTW89_KCC][25] = 76, + [0][0][1][0][RTW89_ACMA][25] = 127, + [0][0][1][0][RTW89_CHILE][25] = 54, + [0][0][1][0][RTW89_UKRAINE][25] = 52, + [0][0][1][0][RTW89_MEXICO][25] = 76, + [0][0][1][0][RTW89_CN][25] = 127, + [0][0][1][0][RTW89_QATAR][25] = 52, + [0][0][1][0][RTW89_FCC][27] = 76, + [0][0][1][0][RTW89_ETSI][27] = 58, + [0][0][1][0][RTW89_MKK][27] = 76, + [0][0][1][0][RTW89_IC][27] = 127, + [0][0][1][0][RTW89_KCC][27] = 76, + [0][0][1][0][RTW89_ACMA][27] = 127, + [0][0][1][0][RTW89_CHILE][27] = 54, + [0][0][1][0][RTW89_UKRAINE][27] = 52, + [0][0][1][0][RTW89_MEXICO][27] = 76, + [0][0][1][0][RTW89_CN][27] = 127, + [0][0][1][0][RTW89_QATAR][27] = 52, + [0][0][1][0][RTW89_FCC][29] = 76, + [0][0][1][0][RTW89_ETSI][29] = 58, + [0][0][1][0][RTW89_MKK][29] = 76, + [0][0][1][0][RTW89_IC][29] = 127, + [0][0][1][0][RTW89_KCC][29] = 76, + [0][0][1][0][RTW89_ACMA][29] = 127, + [0][0][1][0][RTW89_CHILE][29] = 54, + [0][0][1][0][RTW89_UKRAINE][29] = 52, + [0][0][1][0][RTW89_MEXICO][29] = 76, + [0][0][1][0][RTW89_CN][29] = 127, + [0][0][1][0][RTW89_QATAR][29] = 52, + [0][0][1][0][RTW89_FCC][31] = 76, + [0][0][1][0][RTW89_ETSI][31] = 58, + [0][0][1][0][RTW89_MKK][31] = 76, + [0][0][1][0][RTW89_IC][31] = 76, + [0][0][1][0][RTW89_KCC][31] = 76, + [0][0][1][0][RTW89_ACMA][31] = 58, + [0][0][1][0][RTW89_CHILE][31] = 54, + [0][0][1][0][RTW89_UKRAINE][31] = 52, + [0][0][1][0][RTW89_MEXICO][31] = 76, + [0][0][1][0][RTW89_CN][31] = 127, + [0][0][1][0][RTW89_QATAR][31] = 52, + [0][0][1][0][RTW89_FCC][33] = 76, + [0][0][1][0][RTW89_ETSI][33] = 58, + [0][0][1][0][RTW89_MKK][33] = 76, + [0][0][1][0][RTW89_IC][33] = 76, + [0][0][1][0][RTW89_KCC][33] = 76, + [0][0][1][0][RTW89_ACMA][33] = 58, + [0][0][1][0][RTW89_CHILE][33] = 54, + [0][0][1][0][RTW89_UKRAINE][33] = 52, + [0][0][1][0][RTW89_MEXICO][33] = 76, + [0][0][1][0][RTW89_CN][33] = 127, + [0][0][1][0][RTW89_QATAR][33] = 52, + [0][0][1][0][RTW89_FCC][35] = 74, + [0][0][1][0][RTW89_ETSI][35] = 58, + [0][0][1][0][RTW89_MKK][35] = 76, + [0][0][1][0][RTW89_IC][35] = 74, + [0][0][1][0][RTW89_KCC][35] = 76, + [0][0][1][0][RTW89_ACMA][35] = 58, + [0][0][1][0][RTW89_CHILE][35] = 54, + [0][0][1][0][RTW89_UKRAINE][35] = 52, + [0][0][1][0][RTW89_MEXICO][35] = 74, + [0][0][1][0][RTW89_CN][35] = 127, + [0][0][1][0][RTW89_QATAR][35] = 52, + [0][0][1][0][RTW89_FCC][37] = 76, + [0][0][1][0][RTW89_ETSI][37] = 127, + [0][0][1][0][RTW89_MKK][37] = 76, + [0][0][1][0][RTW89_IC][37] = 76, + [0][0][1][0][RTW89_KCC][37] = 76, + [0][0][1][0][RTW89_ACMA][37] = 76, + [0][0][1][0][RTW89_CHILE][37] = 54, + [0][0][1][0][RTW89_UKRAINE][37] = 127, + [0][0][1][0][RTW89_MEXICO][37] = 76, + [0][0][1][0][RTW89_CN][37] = 127, + [0][0][1][0][RTW89_QATAR][37] = 127, + [0][0][1][0][RTW89_FCC][38] = 76, + [0][0][1][0][RTW89_ETSI][38] = 28, + [0][0][1][0][RTW89_MKK][38] = 127, + [0][0][1][0][RTW89_IC][38] = 76, + [0][0][1][0][RTW89_KCC][38] = 76, + [0][0][1][0][RTW89_ACMA][38] = 76, + [0][0][1][0][RTW89_CHILE][38] = 54, + [0][0][1][0][RTW89_UKRAINE][38] = 28, + [0][0][1][0][RTW89_MEXICO][38] = 76, + [0][0][1][0][RTW89_CN][38] = 72, + [0][0][1][0][RTW89_QATAR][38] = 28, + [0][0][1][0][RTW89_FCC][40] = 76, + [0][0][1][0][RTW89_ETSI][40] = 28, + [0][0][1][0][RTW89_MKK][40] = 127, + [0][0][1][0][RTW89_IC][40] = 76, + [0][0][1][0][RTW89_KCC][40] = 76, + [0][0][1][0][RTW89_ACMA][40] = 76, + [0][0][1][0][RTW89_CHILE][40] = 54, + [0][0][1][0][RTW89_UKRAINE][40] = 28, + [0][0][1][0][RTW89_MEXICO][40] = 76, + [0][0][1][0][RTW89_CN][40] = 76, + [0][0][1][0][RTW89_QATAR][40] = 28, + [0][0][1][0][RTW89_FCC][42] = 76, + [0][0][1][0][RTW89_ETSI][42] = 28, + [0][0][1][0][RTW89_MKK][42] = 127, + [0][0][1][0][RTW89_IC][42] = 76, + [0][0][1][0][RTW89_KCC][42] = 76, + [0][0][1][0][RTW89_ACMA][42] = 76, + [0][0][1][0][RTW89_CHILE][42] = 54, + [0][0][1][0][RTW89_UKRAINE][42] = 28, + [0][0][1][0][RTW89_MEXICO][42] = 76, + [0][0][1][0][RTW89_CN][42] = 76, + [0][0][1][0][RTW89_QATAR][42] = 28, + [0][0][1][0][RTW89_FCC][44] = 76, + [0][0][1][0][RTW89_ETSI][44] = 28, + [0][0][1][0][RTW89_MKK][44] = 127, + [0][0][1][0][RTW89_IC][44] = 76, + [0][0][1][0][RTW89_KCC][44] = 76, + [0][0][1][0][RTW89_ACMA][44] = 76, + [0][0][1][0][RTW89_CHILE][44] = 54, + [0][0][1][0][RTW89_UKRAINE][44] = 28, + [0][0][1][0][RTW89_MEXICO][44] = 76, + [0][0][1][0][RTW89_CN][44] = 76, + [0][0][1][0][RTW89_QATAR][44] = 28, + [0][0][1][0][RTW89_FCC][46] = 76, + [0][0][1][0][RTW89_ETSI][46] = 28, + [0][0][1][0][RTW89_MKK][46] = 127, + [0][0][1][0][RTW89_IC][46] = 76, + [0][0][1][0][RTW89_KCC][46] = 76, + [0][0][1][0][RTW89_ACMA][46] = 76, + [0][0][1][0][RTW89_CHILE][46] = 54, + [0][0][1][0][RTW89_UKRAINE][46] = 28, + [0][0][1][0][RTW89_MEXICO][46] = 76, + [0][0][1][0][RTW89_CN][46] = 76, + [0][0][1][0][RTW89_QATAR][46] = 28, + [0][1][1][0][RTW89_FCC][0] = 68, + [0][1][1][0][RTW89_ETSI][0] = 46, + [0][1][1][0][RTW89_MKK][0] = 50, + [0][1][1][0][RTW89_IC][0] = 40, + [0][1][1][0][RTW89_KCC][0] = 72, + [0][1][1][0][RTW89_ACMA][0] = 46, + [0][1][1][0][RTW89_CHILE][0] = 18, + [0][1][1][0][RTW89_UKRAINE][0] = 40, + [0][1][1][0][RTW89_MEXICO][0] = 50, + [0][1][1][0][RTW89_CN][0] = 46, + [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_FCC][2] = 68, + [0][1][1][0][RTW89_ETSI][2] = 46, + [0][1][1][0][RTW89_MKK][2] = 50, + [0][1][1][0][RTW89_IC][2] = 40, + [0][1][1][0][RTW89_KCC][2] = 72, + [0][1][1][0][RTW89_ACMA][2] = 46, + [0][1][1][0][RTW89_CHILE][2] = 18, + [0][1][1][0][RTW89_UKRAINE][2] = 40, + [0][1][1][0][RTW89_MEXICO][2] = 50, + [0][1][1][0][RTW89_CN][2] = 46, + [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_FCC][4] = 68, + [0][1][1][0][RTW89_ETSI][4] = 46, + [0][1][1][0][RTW89_MKK][4] = 50, + [0][1][1][0][RTW89_IC][4] = 40, + [0][1][1][0][RTW89_KCC][4] = 72, + [0][1][1][0][RTW89_ACMA][4] = 46, + [0][1][1][0][RTW89_CHILE][4] = 18, + [0][1][1][0][RTW89_UKRAINE][4] = 40, + [0][1][1][0][RTW89_MEXICO][4] = 50, + [0][1][1][0][RTW89_CN][4] = 46, + [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_FCC][6] = 68, + [0][1][1][0][RTW89_ETSI][6] = 46, + [0][1][1][0][RTW89_MKK][6] = 50, + [0][1][1][0][RTW89_IC][6] = 40, + [0][1][1][0][RTW89_KCC][6] = 44, + [0][1][1][0][RTW89_ACMA][6] = 46, + [0][1][1][0][RTW89_CHILE][6] = 18, + [0][1][1][0][RTW89_UKRAINE][6] = 40, + [0][1][1][0][RTW89_MEXICO][6] = 50, + [0][1][1][0][RTW89_CN][6] = 46, + [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_FCC][8] = 68, + [0][1][1][0][RTW89_ETSI][8] = 46, + [0][1][1][0][RTW89_MKK][8] = 50, + [0][1][1][0][RTW89_IC][8] = 52, + [0][1][1][0][RTW89_KCC][8] = 72, + [0][1][1][0][RTW89_ACMA][8] = 46, + [0][1][1][0][RTW89_CHILE][8] = 42, + [0][1][1][0][RTW89_UKRAINE][8] = 40, + [0][1][1][0][RTW89_MEXICO][8] = 68, + [0][1][1][0][RTW89_CN][8] = 46, + [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_FCC][10] = 68, + [0][1][1][0][RTW89_ETSI][10] = 46, + [0][1][1][0][RTW89_MKK][10] = 50, + [0][1][1][0][RTW89_IC][10] = 52, + [0][1][1][0][RTW89_KCC][10] = 72, + [0][1][1][0][RTW89_ACMA][10] = 46, + [0][1][1][0][RTW89_CHILE][10] = 42, + [0][1][1][0][RTW89_UKRAINE][10] = 40, + [0][1][1][0][RTW89_MEXICO][10] = 68, + [0][1][1][0][RTW89_CN][10] = 46, + [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_FCC][12] = 68, + [0][1][1][0][RTW89_ETSI][12] = 46, + [0][1][1][0][RTW89_MKK][12] = 50, + [0][1][1][0][RTW89_IC][12] = 52, + [0][1][1][0][RTW89_KCC][12] = 72, + [0][1][1][0][RTW89_ACMA][12] = 46, + [0][1][1][0][RTW89_CHILE][12] = 42, + [0][1][1][0][RTW89_UKRAINE][12] = 40, + [0][1][1][0][RTW89_MEXICO][12] = 68, + [0][1][1][0][RTW89_CN][12] = 46, + [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_FCC][14] = 68, + [0][1][1][0][RTW89_ETSI][14] = 46, + [0][1][1][0][RTW89_MKK][14] = 50, + [0][1][1][0][RTW89_IC][14] = 52, + [0][1][1][0][RTW89_KCC][14] = 72, + [0][1][1][0][RTW89_ACMA][14] = 46, + [0][1][1][0][RTW89_CHILE][14] = 42, + [0][1][1][0][RTW89_UKRAINE][14] = 40, + [0][1][1][0][RTW89_MEXICO][14] = 68, + [0][1][1][0][RTW89_CN][14] = 46, + [0][1][1][0][RTW89_QATAR][14] = 46, + [0][1][1][0][RTW89_FCC][15] = 68, + [0][1][1][0][RTW89_ETSI][15] = 46, + [0][1][1][0][RTW89_MKK][15] = 70, + [0][1][1][0][RTW89_IC][15] = 68, + [0][1][1][0][RTW89_KCC][15] = 72, + [0][1][1][0][RTW89_ACMA][15] = 46, + [0][1][1][0][RTW89_CHILE][15] = 42, + [0][1][1][0][RTW89_UKRAINE][15] = 40, + [0][1][1][0][RTW89_MEXICO][15] = 68, + [0][1][1][0][RTW89_CN][15] = 127, + [0][1][1][0][RTW89_QATAR][15] = 40, + [0][1][1][0][RTW89_FCC][17] = 68, + [0][1][1][0][RTW89_ETSI][17] = 46, + [0][1][1][0][RTW89_MKK][17] = 70, + [0][1][1][0][RTW89_IC][17] = 68, + [0][1][1][0][RTW89_KCC][17] = 72, + [0][1][1][0][RTW89_ACMA][17] = 46, + [0][1][1][0][RTW89_CHILE][17] = 42, + [0][1][1][0][RTW89_UKRAINE][17] = 40, + [0][1][1][0][RTW89_MEXICO][17] = 68, + [0][1][1][0][RTW89_CN][17] = 127, + [0][1][1][0][RTW89_QATAR][17] = 40, + [0][1][1][0][RTW89_FCC][19] = 68, + [0][1][1][0][RTW89_ETSI][19] = 46, + [0][1][1][0][RTW89_MKK][19] = 70, + [0][1][1][0][RTW89_IC][19] = 68, + [0][1][1][0][RTW89_KCC][19] = 72, + [0][1][1][0][RTW89_ACMA][19] = 46, + [0][1][1][0][RTW89_CHILE][19] = 42, + [0][1][1][0][RTW89_UKRAINE][19] = 40, + [0][1][1][0][RTW89_MEXICO][19] = 68, + [0][1][1][0][RTW89_CN][19] = 127, + [0][1][1][0][RTW89_QATAR][19] = 40, + [0][1][1][0][RTW89_FCC][21] = 68, + [0][1][1][0][RTW89_ETSI][21] = 46, + [0][1][1][0][RTW89_MKK][21] = 70, + [0][1][1][0][RTW89_IC][21] = 68, + [0][1][1][0][RTW89_KCC][21] = 72, + [0][1][1][0][RTW89_ACMA][21] = 46, + [0][1][1][0][RTW89_CHILE][21] = 42, + [0][1][1][0][RTW89_UKRAINE][21] = 40, + [0][1][1][0][RTW89_MEXICO][21] = 68, + [0][1][1][0][RTW89_CN][21] = 127, + [0][1][1][0][RTW89_QATAR][21] = 40, + [0][1][1][0][RTW89_FCC][23] = 68, + [0][1][1][0][RTW89_ETSI][23] = 46, + [0][1][1][0][RTW89_MKK][23] = 70, + [0][1][1][0][RTW89_IC][23] = 68, + [0][1][1][0][RTW89_KCC][23] = 72, + [0][1][1][0][RTW89_ACMA][23] = 46, + [0][1][1][0][RTW89_CHILE][23] = 42, + [0][1][1][0][RTW89_UKRAINE][23] = 40, + [0][1][1][0][RTW89_MEXICO][23] = 68, + [0][1][1][0][RTW89_CN][23] = 127, + [0][1][1][0][RTW89_QATAR][23] = 40, + [0][1][1][0][RTW89_FCC][25] = 68, + [0][1][1][0][RTW89_ETSI][25] = 46, + [0][1][1][0][RTW89_MKK][25] = 70, + [0][1][1][0][RTW89_IC][25] = 127, + [0][1][1][0][RTW89_KCC][25] = 72, + [0][1][1][0][RTW89_ACMA][25] = 127, + [0][1][1][0][RTW89_CHILE][25] = 42, + [0][1][1][0][RTW89_UKRAINE][25] = 40, + [0][1][1][0][RTW89_MEXICO][25] = 68, + [0][1][1][0][RTW89_CN][25] = 127, + [0][1][1][0][RTW89_QATAR][25] = 40, + [0][1][1][0][RTW89_FCC][27] = 68, + [0][1][1][0][RTW89_ETSI][27] = 46, + [0][1][1][0][RTW89_MKK][27] = 70, + [0][1][1][0][RTW89_IC][27] = 127, + [0][1][1][0][RTW89_KCC][27] = 72, + [0][1][1][0][RTW89_ACMA][27] = 127, + [0][1][1][0][RTW89_CHILE][27] = 42, + [0][1][1][0][RTW89_UKRAINE][27] = 40, + [0][1][1][0][RTW89_MEXICO][27] = 68, + [0][1][1][0][RTW89_CN][27] = 127, + [0][1][1][0][RTW89_QATAR][27] = 40, + [0][1][1][0][RTW89_FCC][29] = 68, + [0][1][1][0][RTW89_ETSI][29] = 46, + [0][1][1][0][RTW89_MKK][29] = 70, + [0][1][1][0][RTW89_IC][29] = 127, + [0][1][1][0][RTW89_KCC][29] = 72, + [0][1][1][0][RTW89_ACMA][29] = 127, + [0][1][1][0][RTW89_CHILE][29] = 42, + [0][1][1][0][RTW89_UKRAINE][29] = 40, + [0][1][1][0][RTW89_MEXICO][29] = 68, + [0][1][1][0][RTW89_CN][29] = 127, + [0][1][1][0][RTW89_QATAR][29] = 40, + [0][1][1][0][RTW89_FCC][31] = 68, + [0][1][1][0][RTW89_ETSI][31] = 46, + [0][1][1][0][RTW89_MKK][31] = 70, + [0][1][1][0][RTW89_IC][31] = 68, + [0][1][1][0][RTW89_KCC][31] = 72, + [0][1][1][0][RTW89_ACMA][31] = 46, + [0][1][1][0][RTW89_CHILE][31] = 42, + [0][1][1][0][RTW89_UKRAINE][31] = 40, + [0][1][1][0][RTW89_MEXICO][31] = 68, + [0][1][1][0][RTW89_CN][31] = 127, + [0][1][1][0][RTW89_QATAR][31] = 40, + [0][1][1][0][RTW89_FCC][33] = 68, + [0][1][1][0][RTW89_ETSI][33] = 46, + [0][1][1][0][RTW89_MKK][33] = 70, + [0][1][1][0][RTW89_IC][33] = 68, + [0][1][1][0][RTW89_KCC][33] = 72, + [0][1][1][0][RTW89_ACMA][33] = 46, + [0][1][1][0][RTW89_CHILE][33] = 42, + [0][1][1][0][RTW89_UKRAINE][33] = 40, + [0][1][1][0][RTW89_MEXICO][33] = 68, + [0][1][1][0][RTW89_CN][33] = 127, + [0][1][1][0][RTW89_QATAR][33] = 40, + [0][1][1][0][RTW89_FCC][35] = 66, + [0][1][1][0][RTW89_ETSI][35] = 46, + [0][1][1][0][RTW89_MKK][35] = 70, + [0][1][1][0][RTW89_IC][35] = 66, + [0][1][1][0][RTW89_KCC][35] = 72, + [0][1][1][0][RTW89_ACMA][35] = 46, + [0][1][1][0][RTW89_CHILE][35] = 42, + [0][1][1][0][RTW89_UKRAINE][35] = 40, + [0][1][1][0][RTW89_MEXICO][35] = 66, + [0][1][1][0][RTW89_CN][35] = 127, + [0][1][1][0][RTW89_QATAR][35] = 40, + [0][1][1][0][RTW89_FCC][37] = 68, + [0][1][1][0][RTW89_ETSI][37] = 127, + [0][1][1][0][RTW89_MKK][37] = 70, + [0][1][1][0][RTW89_IC][37] = 68, + [0][1][1][0][RTW89_KCC][37] = 72, + [0][1][1][0][RTW89_ACMA][37] = 68, + [0][1][1][0][RTW89_CHILE][37] = 42, + [0][1][1][0][RTW89_UKRAINE][37] = 127, + [0][1][1][0][RTW89_MEXICO][37] = 68, + [0][1][1][0][RTW89_CN][37] = 127, + [0][1][1][0][RTW89_QATAR][37] = 127, + [0][1][1][0][RTW89_FCC][38] = 76, + [0][1][1][0][RTW89_ETSI][38] = 16, + [0][1][1][0][RTW89_MKK][38] = 127, + [0][1][1][0][RTW89_IC][38] = 76, + [0][1][1][0][RTW89_KCC][38] = 72, + [0][1][1][0][RTW89_ACMA][38] = 76, + [0][1][1][0][RTW89_CHILE][38] = 42, + [0][1][1][0][RTW89_UKRAINE][38] = 16, + [0][1][1][0][RTW89_MEXICO][38] = 76, + [0][1][1][0][RTW89_CN][38] = 72, + [0][1][1][0][RTW89_QATAR][38] = 16, + [0][1][1][0][RTW89_FCC][40] = 76, + [0][1][1][0][RTW89_ETSI][40] = 16, + [0][1][1][0][RTW89_MKK][40] = 127, + [0][1][1][0][RTW89_IC][40] = 76, + [0][1][1][0][RTW89_KCC][40] = 72, + [0][1][1][0][RTW89_ACMA][40] = 76, + [0][1][1][0][RTW89_CHILE][40] = 42, + [0][1][1][0][RTW89_UKRAINE][40] = 16, + [0][1][1][0][RTW89_MEXICO][40] = 76, + [0][1][1][0][RTW89_CN][40] = 76, + [0][1][1][0][RTW89_QATAR][40] = 16, + [0][1][1][0][RTW89_FCC][42] = 76, + [0][1][1][0][RTW89_ETSI][42] = 16, + [0][1][1][0][RTW89_MKK][42] = 127, + [0][1][1][0][RTW89_IC][42] = 76, + [0][1][1][0][RTW89_KCC][42] = 72, + [0][1][1][0][RTW89_ACMA][42] = 76, + [0][1][1][0][RTW89_CHILE][42] = 42, + [0][1][1][0][RTW89_UKRAINE][42] = 16, + [0][1][1][0][RTW89_MEXICO][42] = 76, + [0][1][1][0][RTW89_CN][42] = 76, + [0][1][1][0][RTW89_QATAR][42] = 16, + [0][1][1][0][RTW89_FCC][44] = 76, + [0][1][1][0][RTW89_ETSI][44] = 16, + [0][1][1][0][RTW89_MKK][44] = 127, + [0][1][1][0][RTW89_IC][44] = 76, + [0][1][1][0][RTW89_KCC][44] = 72, + [0][1][1][0][RTW89_ACMA][44] = 76, + [0][1][1][0][RTW89_CHILE][44] = 42, + [0][1][1][0][RTW89_UKRAINE][44] = 16, + [0][1][1][0][RTW89_MEXICO][44] = 76, + [0][1][1][0][RTW89_CN][44] = 76, + [0][1][1][0][RTW89_QATAR][44] = 16, + [0][1][1][0][RTW89_FCC][46] = 76, + [0][1][1][0][RTW89_ETSI][46] = 16, + [0][1][1][0][RTW89_MKK][46] = 127, + [0][1][1][0][RTW89_IC][46] = 76, + [0][1][1][0][RTW89_KCC][46] = 72, + [0][1][1][0][RTW89_ACMA][46] = 76, + [0][1][1][0][RTW89_CHILE][46] = 42, + [0][1][1][0][RTW89_UKRAINE][46] = 16, + [0][1][1][0][RTW89_MEXICO][46] = 76, + [0][1][1][0][RTW89_CN][46] = 76, + [0][1][1][0][RTW89_QATAR][46] = 16, + [0][0][2][0][RTW89_FCC][0] = 76, + [0][0][2][0][RTW89_ETSI][0] = 58, + [0][0][2][0][RTW89_MKK][0] = 62, + [0][0][2][0][RTW89_IC][0] = 62, + [0][0][2][0][RTW89_KCC][0] = 76, + [0][0][2][0][RTW89_ACMA][0] = 58, + [0][0][2][0][RTW89_CHILE][0] = 30, + [0][0][2][0][RTW89_UKRAINE][0] = 52, + [0][0][2][0][RTW89_MEXICO][0] = 62, + [0][0][2][0][RTW89_CN][0] = 58, + [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_FCC][2] = 76, + [0][0][2][0][RTW89_ETSI][2] = 58, + [0][0][2][0][RTW89_MKK][2] = 62, + [0][0][2][0][RTW89_IC][2] = 62, + [0][0][2][0][RTW89_KCC][2] = 76, + [0][0][2][0][RTW89_ACMA][2] = 58, + [0][0][2][0][RTW89_CHILE][2] = 30, + [0][0][2][0][RTW89_UKRAINE][2] = 52, + [0][0][2][0][RTW89_MEXICO][2] = 62, + [0][0][2][0][RTW89_CN][2] = 58, + [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_FCC][4] = 76, + [0][0][2][0][RTW89_ETSI][4] = 58, + [0][0][2][0][RTW89_MKK][4] = 62, + [0][0][2][0][RTW89_IC][4] = 62, + [0][0][2][0][RTW89_KCC][4] = 76, + [0][0][2][0][RTW89_ACMA][4] = 58, + [0][0][2][0][RTW89_CHILE][4] = 30, + [0][0][2][0][RTW89_UKRAINE][4] = 52, + [0][0][2][0][RTW89_MEXICO][4] = 62, + [0][0][2][0][RTW89_CN][4] = 58, + [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_FCC][6] = 76, + [0][0][2][0][RTW89_ETSI][6] = 58, + [0][0][2][0][RTW89_MKK][6] = 62, + [0][0][2][0][RTW89_IC][6] = 62, + [0][0][2][0][RTW89_KCC][6] = 58, + [0][0][2][0][RTW89_ACMA][6] = 58, + [0][0][2][0][RTW89_CHILE][6] = 30, + [0][0][2][0][RTW89_UKRAINE][6] = 52, + [0][0][2][0][RTW89_MEXICO][6] = 62, + [0][0][2][0][RTW89_CN][6] = 58, + [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_FCC][8] = 76, + [0][0][2][0][RTW89_ETSI][8] = 58, + [0][0][2][0][RTW89_MKK][8] = 62, + [0][0][2][0][RTW89_IC][8] = 64, + [0][0][2][0][RTW89_KCC][8] = 76, + [0][0][2][0][RTW89_ACMA][8] = 58, + [0][0][2][0][RTW89_CHILE][8] = 54, + [0][0][2][0][RTW89_UKRAINE][8] = 52, + [0][0][2][0][RTW89_MEXICO][8] = 76, + [0][0][2][0][RTW89_CN][8] = 58, + [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_FCC][10] = 76, + [0][0][2][0][RTW89_ETSI][10] = 58, + [0][0][2][0][RTW89_MKK][10] = 62, + [0][0][2][0][RTW89_IC][10] = 64, + [0][0][2][0][RTW89_KCC][10] = 76, + [0][0][2][0][RTW89_ACMA][10] = 58, + [0][0][2][0][RTW89_CHILE][10] = 54, + [0][0][2][0][RTW89_UKRAINE][10] = 52, + [0][0][2][0][RTW89_MEXICO][10] = 76, + [0][0][2][0][RTW89_CN][10] = 58, + [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_FCC][12] = 76, + [0][0][2][0][RTW89_ETSI][12] = 58, + [0][0][2][0][RTW89_MKK][12] = 62, + [0][0][2][0][RTW89_IC][12] = 64, + [0][0][2][0][RTW89_KCC][12] = 76, + [0][0][2][0][RTW89_ACMA][12] = 58, + [0][0][2][0][RTW89_CHILE][12] = 54, + [0][0][2][0][RTW89_UKRAINE][12] = 52, + [0][0][2][0][RTW89_MEXICO][12] = 76, + [0][0][2][0][RTW89_CN][12] = 58, + [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_FCC][14] = 76, + [0][0][2][0][RTW89_ETSI][14] = 58, + [0][0][2][0][RTW89_MKK][14] = 62, + [0][0][2][0][RTW89_IC][14] = 64, + [0][0][2][0][RTW89_KCC][14] = 76, + [0][0][2][0][RTW89_ACMA][14] = 58, + [0][0][2][0][RTW89_CHILE][14] = 54, + [0][0][2][0][RTW89_UKRAINE][14] = 52, + [0][0][2][0][RTW89_MEXICO][14] = 76, + [0][0][2][0][RTW89_CN][14] = 58, + [0][0][2][0][RTW89_QATAR][14] = 58, + [0][0][2][0][RTW89_FCC][15] = 74, + [0][0][2][0][RTW89_ETSI][15] = 58, + [0][0][2][0][RTW89_MKK][15] = 76, + [0][0][2][0][RTW89_IC][15] = 74, + [0][0][2][0][RTW89_KCC][15] = 76, + [0][0][2][0][RTW89_ACMA][15] = 58, + [0][0][2][0][RTW89_CHILE][15] = 54, + [0][0][2][0][RTW89_UKRAINE][15] = 52, + [0][0][2][0][RTW89_MEXICO][15] = 74, + [0][0][2][0][RTW89_CN][15] = 127, + [0][0][2][0][RTW89_QATAR][15] = 52, + [0][0][2][0][RTW89_FCC][17] = 76, + [0][0][2][0][RTW89_ETSI][17] = 58, + [0][0][2][0][RTW89_MKK][17] = 76, + [0][0][2][0][RTW89_IC][17] = 76, + [0][0][2][0][RTW89_KCC][17] = 76, + [0][0][2][0][RTW89_ACMA][17] = 58, + [0][0][2][0][RTW89_CHILE][17] = 54, + [0][0][2][0][RTW89_UKRAINE][17] = 52, + [0][0][2][0][RTW89_MEXICO][17] = 76, + [0][0][2][0][RTW89_CN][17] = 127, + [0][0][2][0][RTW89_QATAR][17] = 52, + [0][0][2][0][RTW89_FCC][19] = 76, + [0][0][2][0][RTW89_ETSI][19] = 58, + [0][0][2][0][RTW89_MKK][19] = 76, + [0][0][2][0][RTW89_IC][19] = 76, + [0][0][2][0][RTW89_KCC][19] = 76, + [0][0][2][0][RTW89_ACMA][19] = 58, + [0][0][2][0][RTW89_CHILE][19] = 54, + [0][0][2][0][RTW89_UKRAINE][19] = 52, + [0][0][2][0][RTW89_MEXICO][19] = 76, + [0][0][2][0][RTW89_CN][19] = 127, + [0][0][2][0][RTW89_QATAR][19] = 52, + [0][0][2][0][RTW89_FCC][21] = 76, + [0][0][2][0][RTW89_ETSI][21] = 58, + [0][0][2][0][RTW89_MKK][21] = 76, + [0][0][2][0][RTW89_IC][21] = 76, + [0][0][2][0][RTW89_KCC][21] = 76, + [0][0][2][0][RTW89_ACMA][21] = 58, + [0][0][2][0][RTW89_CHILE][21] = 54, + [0][0][2][0][RTW89_UKRAINE][21] = 52, + [0][0][2][0][RTW89_MEXICO][21] = 76, + [0][0][2][0][RTW89_CN][21] = 127, + [0][0][2][0][RTW89_QATAR][21] = 52, + [0][0][2][0][RTW89_FCC][23] = 76, + [0][0][2][0][RTW89_ETSI][23] = 58, + [0][0][2][0][RTW89_MKK][23] = 76, + [0][0][2][0][RTW89_IC][23] = 76, + [0][0][2][0][RTW89_KCC][23] = 76, + [0][0][2][0][RTW89_ACMA][23] = 58, + [0][0][2][0][RTW89_CHILE][23] = 54, + [0][0][2][0][RTW89_UKRAINE][23] = 52, + [0][0][2][0][RTW89_MEXICO][23] = 76, + [0][0][2][0][RTW89_CN][23] = 127, + [0][0][2][0][RTW89_QATAR][23] = 52, + [0][0][2][0][RTW89_FCC][25] = 76, + [0][0][2][0][RTW89_ETSI][25] = 58, + [0][0][2][0][RTW89_MKK][25] = 76, + [0][0][2][0][RTW89_IC][25] = 127, + [0][0][2][0][RTW89_KCC][25] = 76, + [0][0][2][0][RTW89_ACMA][25] = 127, + [0][0][2][0][RTW89_CHILE][25] = 54, + [0][0][2][0][RTW89_UKRAINE][25] = 52, + [0][0][2][0][RTW89_MEXICO][25] = 76, + [0][0][2][0][RTW89_CN][25] = 127, + [0][0][2][0][RTW89_QATAR][25] = 52, + [0][0][2][0][RTW89_FCC][27] = 76, + [0][0][2][0][RTW89_ETSI][27] = 58, + [0][0][2][0][RTW89_MKK][27] = 76, + [0][0][2][0][RTW89_IC][27] = 127, + [0][0][2][0][RTW89_KCC][27] = 76, + [0][0][2][0][RTW89_ACMA][27] = 127, + [0][0][2][0][RTW89_CHILE][27] = 54, + [0][0][2][0][RTW89_UKRAINE][27] = 52, + [0][0][2][0][RTW89_MEXICO][27] = 76, + [0][0][2][0][RTW89_CN][27] = 127, + [0][0][2][0][RTW89_QATAR][27] = 52, + [0][0][2][0][RTW89_FCC][29] = 76, + [0][0][2][0][RTW89_ETSI][29] = 58, + [0][0][2][0][RTW89_MKK][29] = 76, + [0][0][2][0][RTW89_IC][29] = 127, + [0][0][2][0][RTW89_KCC][29] = 76, + [0][0][2][0][RTW89_ACMA][29] = 127, + [0][0][2][0][RTW89_CHILE][29] = 54, + [0][0][2][0][RTW89_UKRAINE][29] = 52, + [0][0][2][0][RTW89_MEXICO][29] = 76, + [0][0][2][0][RTW89_CN][29] = 127, + [0][0][2][0][RTW89_QATAR][29] = 52, + [0][0][2][0][RTW89_FCC][31] = 76, + [0][0][2][0][RTW89_ETSI][31] = 58, + [0][0][2][0][RTW89_MKK][31] = 76, + [0][0][2][0][RTW89_IC][31] = 76, + [0][0][2][0][RTW89_KCC][31] = 76, + [0][0][2][0][RTW89_ACMA][31] = 58, + [0][0][2][0][RTW89_CHILE][31] = 54, + [0][0][2][0][RTW89_UKRAINE][31] = 52, + [0][0][2][0][RTW89_MEXICO][31] = 76, + [0][0][2][0][RTW89_CN][31] = 127, + [0][0][2][0][RTW89_QATAR][31] = 52, + [0][0][2][0][RTW89_FCC][33] = 76, + [0][0][2][0][RTW89_ETSI][33] = 58, + [0][0][2][0][RTW89_MKK][33] = 76, + [0][0][2][0][RTW89_IC][33] = 76, + [0][0][2][0][RTW89_KCC][33] = 76, + [0][0][2][0][RTW89_ACMA][33] = 58, + [0][0][2][0][RTW89_CHILE][33] = 54, + [0][0][2][0][RTW89_UKRAINE][33] = 52, + [0][0][2][0][RTW89_MEXICO][33] = 76, + [0][0][2][0][RTW89_CN][33] = 127, + [0][0][2][0][RTW89_QATAR][33] = 52, + [0][0][2][0][RTW89_FCC][35] = 70, + [0][0][2][0][RTW89_ETSI][35] = 58, + [0][0][2][0][RTW89_MKK][35] = 76, + [0][0][2][0][RTW89_IC][35] = 70, + [0][0][2][0][RTW89_KCC][35] = 76, + [0][0][2][0][RTW89_ACMA][35] = 58, + [0][0][2][0][RTW89_CHILE][35] = 54, + [0][0][2][0][RTW89_UKRAINE][35] = 52, + [0][0][2][0][RTW89_MEXICO][35] = 70, + [0][0][2][0][RTW89_CN][35] = 127, + [0][0][2][0][RTW89_QATAR][35] = 52, + [0][0][2][0][RTW89_FCC][37] = 76, + [0][0][2][0][RTW89_ETSI][37] = 127, + [0][0][2][0][RTW89_MKK][37] = 76, + [0][0][2][0][RTW89_IC][37] = 76, + [0][0][2][0][RTW89_KCC][37] = 76, + [0][0][2][0][RTW89_ACMA][37] = 76, + [0][0][2][0][RTW89_CHILE][37] = 54, + [0][0][2][0][RTW89_UKRAINE][37] = 127, + [0][0][2][0][RTW89_MEXICO][37] = 76, + [0][0][2][0][RTW89_CN][37] = 127, + [0][0][2][0][RTW89_QATAR][37] = 127, + [0][0][2][0][RTW89_FCC][38] = 76, + [0][0][2][0][RTW89_ETSI][38] = 28, + [0][0][2][0][RTW89_MKK][38] = 127, + [0][0][2][0][RTW89_IC][38] = 76, + [0][0][2][0][RTW89_KCC][38] = 76, + [0][0][2][0][RTW89_ACMA][38] = 76, + [0][0][2][0][RTW89_CHILE][38] = 54, + [0][0][2][0][RTW89_UKRAINE][38] = 28, + [0][0][2][0][RTW89_MEXICO][38] = 76, + [0][0][2][0][RTW89_CN][38] = 68, + [0][0][2][0][RTW89_QATAR][38] = 28, + [0][0][2][0][RTW89_FCC][40] = 76, + [0][0][2][0][RTW89_ETSI][40] = 28, + [0][0][2][0][RTW89_MKK][40] = 127, + [0][0][2][0][RTW89_IC][40] = 76, + [0][0][2][0][RTW89_KCC][40] = 76, + [0][0][2][0][RTW89_ACMA][40] = 76, + [0][0][2][0][RTW89_CHILE][40] = 54, + [0][0][2][0][RTW89_UKRAINE][40] = 28, + [0][0][2][0][RTW89_MEXICO][40] = 76, + [0][0][2][0][RTW89_CN][40] = 76, + [0][0][2][0][RTW89_QATAR][40] = 28, + [0][0][2][0][RTW89_FCC][42] = 76, + [0][0][2][0][RTW89_ETSI][42] = 28, + [0][0][2][0][RTW89_MKK][42] = 127, + [0][0][2][0][RTW89_IC][42] = 76, + [0][0][2][0][RTW89_KCC][42] = 76, + [0][0][2][0][RTW89_ACMA][42] = 76, + [0][0][2][0][RTW89_CHILE][42] = 54, + [0][0][2][0][RTW89_UKRAINE][42] = 28, + [0][0][2][0][RTW89_MEXICO][42] = 76, + [0][0][2][0][RTW89_CN][42] = 76, + [0][0][2][0][RTW89_QATAR][42] = 28, + [0][0][2][0][RTW89_FCC][44] = 76, + [0][0][2][0][RTW89_ETSI][44] = 28, + [0][0][2][0][RTW89_MKK][44] = 127, + [0][0][2][0][RTW89_IC][44] = 76, + [0][0][2][0][RTW89_KCC][44] = 76, + [0][0][2][0][RTW89_ACMA][44] = 76, + [0][0][2][0][RTW89_CHILE][44] = 54, + [0][0][2][0][RTW89_UKRAINE][44] = 28, + [0][0][2][0][RTW89_MEXICO][44] = 76, + [0][0][2][0][RTW89_CN][44] = 76, + [0][0][2][0][RTW89_QATAR][44] = 28, + [0][0][2][0][RTW89_FCC][46] = 76, + [0][0][2][0][RTW89_ETSI][46] = 28, + [0][0][2][0][RTW89_MKK][46] = 127, + [0][0][2][0][RTW89_IC][46] = 76, + [0][0][2][0][RTW89_KCC][46] = 76, + [0][0][2][0][RTW89_ACMA][46] = 76, + [0][0][2][0][RTW89_CHILE][46] = 54, + [0][0][2][0][RTW89_UKRAINE][46] = 28, + [0][0][2][0][RTW89_MEXICO][46] = 76, + [0][0][2][0][RTW89_CN][46] = 76, + [0][0][2][0][RTW89_QATAR][46] = 28, + [0][1][2][0][RTW89_FCC][0] = 68, + [0][1][2][0][RTW89_ETSI][0] = 46, + [0][1][2][0][RTW89_MKK][0] = 50, + [0][1][2][0][RTW89_IC][0] = 40, + [0][1][2][0][RTW89_KCC][0] = 68, + [0][1][2][0][RTW89_ACMA][0] = 46, + [0][1][2][0][RTW89_CHILE][0] = 18, + [0][1][2][0][RTW89_UKRAINE][0] = 40, + [0][1][2][0][RTW89_MEXICO][0] = 50, + [0][1][2][0][RTW89_CN][0] = 46, + [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_FCC][2] = 68, + [0][1][2][0][RTW89_ETSI][2] = 46, + [0][1][2][0][RTW89_MKK][2] = 50, + [0][1][2][0][RTW89_IC][2] = 40, + [0][1][2][0][RTW89_KCC][2] = 68, + [0][1][2][0][RTW89_ACMA][2] = 46, + [0][1][2][0][RTW89_CHILE][2] = 18, + [0][1][2][0][RTW89_UKRAINE][2] = 40, + [0][1][2][0][RTW89_MEXICO][2] = 50, + [0][1][2][0][RTW89_CN][2] = 46, + [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_FCC][4] = 68, + [0][1][2][0][RTW89_ETSI][4] = 46, + [0][1][2][0][RTW89_MKK][4] = 50, + [0][1][2][0][RTW89_IC][4] = 40, + [0][1][2][0][RTW89_KCC][4] = 68, + [0][1][2][0][RTW89_ACMA][4] = 46, + [0][1][2][0][RTW89_CHILE][4] = 18, + [0][1][2][0][RTW89_UKRAINE][4] = 40, + [0][1][2][0][RTW89_MEXICO][4] = 50, + [0][1][2][0][RTW89_CN][4] = 46, + [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_FCC][6] = 68, + [0][1][2][0][RTW89_ETSI][6] = 46, + [0][1][2][0][RTW89_MKK][6] = 50, + [0][1][2][0][RTW89_IC][6] = 40, + [0][1][2][0][RTW89_KCC][6] = 38, + [0][1][2][0][RTW89_ACMA][6] = 46, + [0][1][2][0][RTW89_CHILE][6] = 18, + [0][1][2][0][RTW89_UKRAINE][6] = 40, + [0][1][2][0][RTW89_MEXICO][6] = 50, + [0][1][2][0][RTW89_CN][6] = 46, + [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_FCC][8] = 68, + [0][1][2][0][RTW89_ETSI][8] = 46, + [0][1][2][0][RTW89_MKK][8] = 50, + [0][1][2][0][RTW89_IC][8] = 52, + [0][1][2][0][RTW89_KCC][8] = 68, + [0][1][2][0][RTW89_ACMA][8] = 46, + [0][1][2][0][RTW89_CHILE][8] = 42, + [0][1][2][0][RTW89_UKRAINE][8] = 40, + [0][1][2][0][RTW89_MEXICO][8] = 68, + [0][1][2][0][RTW89_CN][8] = 46, + [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_FCC][10] = 68, + [0][1][2][0][RTW89_ETSI][10] = 46, + [0][1][2][0][RTW89_MKK][10] = 50, + [0][1][2][0][RTW89_IC][10] = 52, + [0][1][2][0][RTW89_KCC][10] = 68, + [0][1][2][0][RTW89_ACMA][10] = 46, + [0][1][2][0][RTW89_CHILE][10] = 42, + [0][1][2][0][RTW89_UKRAINE][10] = 40, + [0][1][2][0][RTW89_MEXICO][10] = 68, + [0][1][2][0][RTW89_CN][10] = 46, + [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_FCC][12] = 68, + [0][1][2][0][RTW89_ETSI][12] = 46, + [0][1][2][0][RTW89_MKK][12] = 50, + [0][1][2][0][RTW89_IC][12] = 52, + [0][1][2][0][RTW89_KCC][12] = 68, + [0][1][2][0][RTW89_ACMA][12] = 46, + [0][1][2][0][RTW89_CHILE][12] = 42, + [0][1][2][0][RTW89_UKRAINE][12] = 40, + [0][1][2][0][RTW89_MEXICO][12] = 68, + [0][1][2][0][RTW89_CN][12] = 46, + [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_FCC][14] = 68, + [0][1][2][0][RTW89_ETSI][14] = 46, + [0][1][2][0][RTW89_MKK][14] = 50, + [0][1][2][0][RTW89_IC][14] = 52, + [0][1][2][0][RTW89_KCC][14] = 68, + [0][1][2][0][RTW89_ACMA][14] = 46, + [0][1][2][0][RTW89_CHILE][14] = 42, + [0][1][2][0][RTW89_UKRAINE][14] = 40, + [0][1][2][0][RTW89_MEXICO][14] = 68, + [0][1][2][0][RTW89_CN][14] = 46, + [0][1][2][0][RTW89_QATAR][14] = 46, + [0][1][2][0][RTW89_FCC][15] = 68, + [0][1][2][0][RTW89_ETSI][15] = 46, + [0][1][2][0][RTW89_MKK][15] = 70, + [0][1][2][0][RTW89_IC][15] = 68, + [0][1][2][0][RTW89_KCC][15] = 66, + [0][1][2][0][RTW89_ACMA][15] = 46, + [0][1][2][0][RTW89_CHILE][15] = 42, + [0][1][2][0][RTW89_UKRAINE][15] = 40, + [0][1][2][0][RTW89_MEXICO][15] = 68, + [0][1][2][0][RTW89_CN][15] = 127, + [0][1][2][0][RTW89_QATAR][15] = 40, + [0][1][2][0][RTW89_FCC][17] = 68, + [0][1][2][0][RTW89_ETSI][17] = 46, + [0][1][2][0][RTW89_MKK][17] = 70, + [0][1][2][0][RTW89_IC][17] = 68, + [0][1][2][0][RTW89_KCC][17] = 66, + [0][1][2][0][RTW89_ACMA][17] = 46, + [0][1][2][0][RTW89_CHILE][17] = 42, + [0][1][2][0][RTW89_UKRAINE][17] = 40, + [0][1][2][0][RTW89_MEXICO][17] = 68, + [0][1][2][0][RTW89_CN][17] = 127, + [0][1][2][0][RTW89_QATAR][17] = 40, + [0][1][2][0][RTW89_FCC][19] = 68, + [0][1][2][0][RTW89_ETSI][19] = 46, + [0][1][2][0][RTW89_MKK][19] = 70, + [0][1][2][0][RTW89_IC][19] = 68, + [0][1][2][0][RTW89_KCC][19] = 66, + [0][1][2][0][RTW89_ACMA][19] = 46, + [0][1][2][0][RTW89_CHILE][19] = 42, + [0][1][2][0][RTW89_UKRAINE][19] = 40, + [0][1][2][0][RTW89_MEXICO][19] = 68, + [0][1][2][0][RTW89_CN][19] = 127, + [0][1][2][0][RTW89_QATAR][19] = 40, + [0][1][2][0][RTW89_FCC][21] = 68, + [0][1][2][0][RTW89_ETSI][21] = 46, + [0][1][2][0][RTW89_MKK][21] = 70, + [0][1][2][0][RTW89_IC][21] = 68, + [0][1][2][0][RTW89_KCC][21] = 66, + [0][1][2][0][RTW89_ACMA][21] = 46, + [0][1][2][0][RTW89_CHILE][21] = 42, + [0][1][2][0][RTW89_UKRAINE][21] = 40, + [0][1][2][0][RTW89_MEXICO][21] = 68, + [0][1][2][0][RTW89_CN][21] = 127, + [0][1][2][0][RTW89_QATAR][21] = 40, + [0][1][2][0][RTW89_FCC][23] = 68, + [0][1][2][0][RTW89_ETSI][23] = 46, + [0][1][2][0][RTW89_MKK][23] = 70, + [0][1][2][0][RTW89_IC][23] = 68, + [0][1][2][0][RTW89_KCC][23] = 66, + [0][1][2][0][RTW89_ACMA][23] = 46, + [0][1][2][0][RTW89_CHILE][23] = 42, + [0][1][2][0][RTW89_UKRAINE][23] = 40, + [0][1][2][0][RTW89_MEXICO][23] = 68, + [0][1][2][0][RTW89_CN][23] = 127, + [0][1][2][0][RTW89_QATAR][23] = 40, + [0][1][2][0][RTW89_FCC][25] = 68, + [0][1][2][0][RTW89_ETSI][25] = 46, + [0][1][2][0][RTW89_MKK][25] = 70, + [0][1][2][0][RTW89_IC][25] = 127, + [0][1][2][0][RTW89_KCC][25] = 66, + [0][1][2][0][RTW89_ACMA][25] = 127, + [0][1][2][0][RTW89_CHILE][25] = 42, + [0][1][2][0][RTW89_UKRAINE][25] = 40, + [0][1][2][0][RTW89_MEXICO][25] = 68, + [0][1][2][0][RTW89_CN][25] = 127, + [0][1][2][0][RTW89_QATAR][25] = 40, + [0][1][2][0][RTW89_FCC][27] = 68, + [0][1][2][0][RTW89_ETSI][27] = 46, + [0][1][2][0][RTW89_MKK][27] = 70, + [0][1][2][0][RTW89_IC][27] = 127, + [0][1][2][0][RTW89_KCC][27] = 66, + [0][1][2][0][RTW89_ACMA][27] = 127, + [0][1][2][0][RTW89_CHILE][27] = 42, + [0][1][2][0][RTW89_UKRAINE][27] = 40, + [0][1][2][0][RTW89_MEXICO][27] = 68, + [0][1][2][0][RTW89_CN][27] = 127, + [0][1][2][0][RTW89_QATAR][27] = 40, + [0][1][2][0][RTW89_FCC][29] = 68, + [0][1][2][0][RTW89_ETSI][29] = 46, + [0][1][2][0][RTW89_MKK][29] = 70, + [0][1][2][0][RTW89_IC][29] = 127, + [0][1][2][0][RTW89_KCC][29] = 66, + [0][1][2][0][RTW89_ACMA][29] = 127, + [0][1][2][0][RTW89_CHILE][29] = 42, + [0][1][2][0][RTW89_UKRAINE][29] = 40, + [0][1][2][0][RTW89_MEXICO][29] = 68, + [0][1][2][0][RTW89_CN][29] = 127, + [0][1][2][0][RTW89_QATAR][29] = 40, + [0][1][2][0][RTW89_FCC][31] = 68, + [0][1][2][0][RTW89_ETSI][31] = 46, + [0][1][2][0][RTW89_MKK][31] = 70, + [0][1][2][0][RTW89_IC][31] = 68, + [0][1][2][0][RTW89_KCC][31] = 66, + [0][1][2][0][RTW89_ACMA][31] = 46, + [0][1][2][0][RTW89_CHILE][31] = 42, + [0][1][2][0][RTW89_UKRAINE][31] = 40, + [0][1][2][0][RTW89_MEXICO][31] = 68, + [0][1][2][0][RTW89_CN][31] = 127, + [0][1][2][0][RTW89_QATAR][31] = 40, + [0][1][2][0][RTW89_FCC][33] = 68, + [0][1][2][0][RTW89_ETSI][33] = 46, + [0][1][2][0][RTW89_MKK][33] = 70, + [0][1][2][0][RTW89_IC][33] = 68, + [0][1][2][0][RTW89_KCC][33] = 66, + [0][1][2][0][RTW89_ACMA][33] = 46, + [0][1][2][0][RTW89_CHILE][33] = 42, + [0][1][2][0][RTW89_UKRAINE][33] = 40, + [0][1][2][0][RTW89_MEXICO][33] = 68, + [0][1][2][0][RTW89_CN][33] = 127, + [0][1][2][0][RTW89_QATAR][33] = 40, + [0][1][2][0][RTW89_FCC][35] = 64, + [0][1][2][0][RTW89_ETSI][35] = 46, + [0][1][2][0][RTW89_MKK][35] = 70, + [0][1][2][0][RTW89_IC][35] = 64, + [0][1][2][0][RTW89_KCC][35] = 66, + [0][1][2][0][RTW89_ACMA][35] = 46, + [0][1][2][0][RTW89_CHILE][35] = 42, + [0][1][2][0][RTW89_UKRAINE][35] = 40, + [0][1][2][0][RTW89_MEXICO][35] = 64, + [0][1][2][0][RTW89_CN][35] = 127, + [0][1][2][0][RTW89_QATAR][35] = 40, + [0][1][2][0][RTW89_FCC][37] = 68, + [0][1][2][0][RTW89_ETSI][37] = 127, + [0][1][2][0][RTW89_MKK][37] = 70, + [0][1][2][0][RTW89_IC][37] = 68, + [0][1][2][0][RTW89_KCC][37] = 66, + [0][1][2][0][RTW89_ACMA][37] = 68, + [0][1][2][0][RTW89_CHILE][37] = 42, + [0][1][2][0][RTW89_UKRAINE][37] = 127, + [0][1][2][0][RTW89_MEXICO][37] = 68, + [0][1][2][0][RTW89_CN][37] = 127, + [0][1][2][0][RTW89_QATAR][37] = 127, + [0][1][2][0][RTW89_FCC][38] = 76, + [0][1][2][0][RTW89_ETSI][38] = 16, + [0][1][2][0][RTW89_MKK][38] = 127, + [0][1][2][0][RTW89_IC][38] = 76, + [0][1][2][0][RTW89_KCC][38] = 66, + [0][1][2][0][RTW89_ACMA][38] = 76, + [0][1][2][0][RTW89_CHILE][38] = 42, + [0][1][2][0][RTW89_UKRAINE][38] = 16, + [0][1][2][0][RTW89_MEXICO][38] = 76, + [0][1][2][0][RTW89_CN][38] = 68, + [0][1][2][0][RTW89_QATAR][38] = 16, + [0][1][2][0][RTW89_FCC][40] = 76, + [0][1][2][0][RTW89_ETSI][40] = 16, + [0][1][2][0][RTW89_MKK][40] = 127, + [0][1][2][0][RTW89_IC][40] = 76, + [0][1][2][0][RTW89_KCC][40] = 66, + [0][1][2][0][RTW89_ACMA][40] = 76, + [0][1][2][0][RTW89_CHILE][40] = 42, + [0][1][2][0][RTW89_UKRAINE][40] = 16, + [0][1][2][0][RTW89_MEXICO][40] = 76, + [0][1][2][0][RTW89_CN][40] = 76, + [0][1][2][0][RTW89_QATAR][40] = 16, + [0][1][2][0][RTW89_FCC][42] = 76, + [0][1][2][0][RTW89_ETSI][42] = 16, + [0][1][2][0][RTW89_MKK][42] = 127, + [0][1][2][0][RTW89_IC][42] = 76, + [0][1][2][0][RTW89_KCC][42] = 66, + [0][1][2][0][RTW89_ACMA][42] = 76, + [0][1][2][0][RTW89_CHILE][42] = 42, + [0][1][2][0][RTW89_UKRAINE][42] = 16, + [0][1][2][0][RTW89_MEXICO][42] = 76, + [0][1][2][0][RTW89_CN][42] = 76, + [0][1][2][0][RTW89_QATAR][42] = 16, + [0][1][2][0][RTW89_FCC][44] = 76, + [0][1][2][0][RTW89_ETSI][44] = 16, + [0][1][2][0][RTW89_MKK][44] = 127, + [0][1][2][0][RTW89_IC][44] = 76, + [0][1][2][0][RTW89_KCC][44] = 66, + [0][1][2][0][RTW89_ACMA][44] = 76, + [0][1][2][0][RTW89_CHILE][44] = 42, + [0][1][2][0][RTW89_UKRAINE][44] = 16, + [0][1][2][0][RTW89_MEXICO][44] = 76, + [0][1][2][0][RTW89_CN][44] = 76, + [0][1][2][0][RTW89_QATAR][44] = 16, + [0][1][2][0][RTW89_FCC][46] = 76, + [0][1][2][0][RTW89_ETSI][46] = 16, + [0][1][2][0][RTW89_MKK][46] = 127, + [0][1][2][0][RTW89_IC][46] = 76, + [0][1][2][0][RTW89_KCC][46] = 66, + [0][1][2][0][RTW89_ACMA][46] = 76, + [0][1][2][0][RTW89_CHILE][46] = 42, + [0][1][2][0][RTW89_UKRAINE][46] = 16, + [0][1][2][0][RTW89_MEXICO][46] = 76, + [0][1][2][0][RTW89_CN][46] = 76, + [0][1][2][0][RTW89_QATAR][46] = 16, + [0][1][2][1][RTW89_FCC][0] = 68, + [0][1][2][1][RTW89_ETSI][0] = 34, + [0][1][2][1][RTW89_MKK][0] = 50, + [0][1][2][1][RTW89_IC][0] = 38, + [0][1][2][1][RTW89_KCC][0] = 68, + [0][1][2][1][RTW89_ACMA][0] = 34, + [0][1][2][1][RTW89_CHILE][0] = 6, + [0][1][2][1][RTW89_UKRAINE][0] = 28, + [0][1][2][1][RTW89_MEXICO][0] = 50, + [0][1][2][1][RTW89_CN][0] = 34, + [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_FCC][2] = 68, + [0][1][2][1][RTW89_ETSI][2] = 34, + [0][1][2][1][RTW89_MKK][2] = 50, + [0][1][2][1][RTW89_IC][2] = 38, + [0][1][2][1][RTW89_KCC][2] = 68, + [0][1][2][1][RTW89_ACMA][2] = 34, + [0][1][2][1][RTW89_CHILE][2] = 6, + [0][1][2][1][RTW89_UKRAINE][2] = 28, + [0][1][2][1][RTW89_MEXICO][2] = 50, + [0][1][2][1][RTW89_CN][2] = 34, + [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_FCC][4] = 68, + [0][1][2][1][RTW89_ETSI][4] = 34, + [0][1][2][1][RTW89_MKK][4] = 50, + [0][1][2][1][RTW89_IC][4] = 38, + [0][1][2][1][RTW89_KCC][4] = 68, + [0][1][2][1][RTW89_ACMA][4] = 34, + [0][1][2][1][RTW89_CHILE][4] = 6, + [0][1][2][1][RTW89_UKRAINE][4] = 28, + [0][1][2][1][RTW89_MEXICO][4] = 50, + [0][1][2][1][RTW89_CN][4] = 34, + [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_FCC][6] = 68, + [0][1][2][1][RTW89_ETSI][6] = 34, + [0][1][2][1][RTW89_MKK][6] = 50, + [0][1][2][1][RTW89_IC][6] = 38, + [0][1][2][1][RTW89_KCC][6] = 38, + [0][1][2][1][RTW89_ACMA][6] = 34, + [0][1][2][1][RTW89_CHILE][6] = 6, + [0][1][2][1][RTW89_UKRAINE][6] = 28, + [0][1][2][1][RTW89_MEXICO][6] = 50, + [0][1][2][1][RTW89_CN][6] = 34, + [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_FCC][8] = 68, + [0][1][2][1][RTW89_ETSI][8] = 34, + [0][1][2][1][RTW89_MKK][8] = 50, + [0][1][2][1][RTW89_IC][8] = 38, + [0][1][2][1][RTW89_KCC][8] = 68, + [0][1][2][1][RTW89_ACMA][8] = 34, + [0][1][2][1][RTW89_CHILE][8] = 30, + [0][1][2][1][RTW89_UKRAINE][8] = 28, + [0][1][2][1][RTW89_MEXICO][8] = 68, + [0][1][2][1][RTW89_CN][8] = 34, + [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_FCC][10] = 68, + [0][1][2][1][RTW89_ETSI][10] = 34, + [0][1][2][1][RTW89_MKK][10] = 50, + [0][1][2][1][RTW89_IC][10] = 38, + [0][1][2][1][RTW89_KCC][10] = 68, + [0][1][2][1][RTW89_ACMA][10] = 34, + [0][1][2][1][RTW89_CHILE][10] = 30, + [0][1][2][1][RTW89_UKRAINE][10] = 28, + [0][1][2][1][RTW89_MEXICO][10] = 68, + [0][1][2][1][RTW89_CN][10] = 34, + [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_FCC][12] = 68, + [0][1][2][1][RTW89_ETSI][12] = 34, + [0][1][2][1][RTW89_MKK][12] = 50, + [0][1][2][1][RTW89_IC][12] = 38, + [0][1][2][1][RTW89_KCC][12] = 68, + [0][1][2][1][RTW89_ACMA][12] = 34, + [0][1][2][1][RTW89_CHILE][12] = 30, + [0][1][2][1][RTW89_UKRAINE][12] = 28, + [0][1][2][1][RTW89_MEXICO][12] = 68, + [0][1][2][1][RTW89_CN][12] = 34, + [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_FCC][14] = 68, + [0][1][2][1][RTW89_ETSI][14] = 34, + [0][1][2][1][RTW89_MKK][14] = 50, + [0][1][2][1][RTW89_IC][14] = 38, + [0][1][2][1][RTW89_KCC][14] = 68, + [0][1][2][1][RTW89_ACMA][14] = 34, + [0][1][2][1][RTW89_CHILE][14] = 30, + [0][1][2][1][RTW89_UKRAINE][14] = 28, + [0][1][2][1][RTW89_MEXICO][14] = 68, + [0][1][2][1][RTW89_CN][14] = 34, + [0][1][2][1][RTW89_QATAR][14] = 34, + [0][1][2][1][RTW89_FCC][15] = 68, + [0][1][2][1][RTW89_ETSI][15] = 34, + [0][1][2][1][RTW89_MKK][15] = 70, + [0][1][2][1][RTW89_IC][15] = 62, + [0][1][2][1][RTW89_KCC][15] = 66, + [0][1][2][1][RTW89_ACMA][15] = 34, + [0][1][2][1][RTW89_CHILE][15] = 30, + [0][1][2][1][RTW89_UKRAINE][15] = 28, + [0][1][2][1][RTW89_MEXICO][15] = 68, + [0][1][2][1][RTW89_CN][15] = 127, + [0][1][2][1][RTW89_QATAR][15] = 28, + [0][1][2][1][RTW89_FCC][17] = 68, + [0][1][2][1][RTW89_ETSI][17] = 34, + [0][1][2][1][RTW89_MKK][17] = 70, + [0][1][2][1][RTW89_IC][17] = 62, + [0][1][2][1][RTW89_KCC][17] = 66, + [0][1][2][1][RTW89_ACMA][17] = 34, + [0][1][2][1][RTW89_CHILE][17] = 30, + [0][1][2][1][RTW89_UKRAINE][17] = 28, + [0][1][2][1][RTW89_MEXICO][17] = 68, + [0][1][2][1][RTW89_CN][17] = 127, + [0][1][2][1][RTW89_QATAR][17] = 28, + [0][1][2][1][RTW89_FCC][19] = 68, + [0][1][2][1][RTW89_ETSI][19] = 34, + [0][1][2][1][RTW89_MKK][19] = 70, + [0][1][2][1][RTW89_IC][19] = 62, + [0][1][2][1][RTW89_KCC][19] = 66, + [0][1][2][1][RTW89_ACMA][19] = 34, + [0][1][2][1][RTW89_CHILE][19] = 30, + [0][1][2][1][RTW89_UKRAINE][19] = 28, + [0][1][2][1][RTW89_MEXICO][19] = 68, + [0][1][2][1][RTW89_CN][19] = 127, + [0][1][2][1][RTW89_QATAR][19] = 28, + [0][1][2][1][RTW89_FCC][21] = 68, + [0][1][2][1][RTW89_ETSI][21] = 34, + [0][1][2][1][RTW89_MKK][21] = 70, + [0][1][2][1][RTW89_IC][21] = 62, + [0][1][2][1][RTW89_KCC][21] = 66, + [0][1][2][1][RTW89_ACMA][21] = 34, + [0][1][2][1][RTW89_CHILE][21] = 30, + [0][1][2][1][RTW89_UKRAINE][21] = 28, + [0][1][2][1][RTW89_MEXICO][21] = 68, + [0][1][2][1][RTW89_CN][21] = 127, + [0][1][2][1][RTW89_QATAR][21] = 28, + [0][1][2][1][RTW89_FCC][23] = 68, + [0][1][2][1][RTW89_ETSI][23] = 34, + [0][1][2][1][RTW89_MKK][23] = 70, + [0][1][2][1][RTW89_IC][23] = 62, + [0][1][2][1][RTW89_KCC][23] = 66, + [0][1][2][1][RTW89_ACMA][23] = 34, + [0][1][2][1][RTW89_CHILE][23] = 30, + [0][1][2][1][RTW89_UKRAINE][23] = 28, + [0][1][2][1][RTW89_MEXICO][23] = 68, + [0][1][2][1][RTW89_CN][23] = 127, + [0][1][2][1][RTW89_QATAR][23] = 28, + [0][1][2][1][RTW89_FCC][25] = 68, + [0][1][2][1][RTW89_ETSI][25] = 34, + [0][1][2][1][RTW89_MKK][25] = 70, + [0][1][2][1][RTW89_IC][25] = 127, + [0][1][2][1][RTW89_KCC][25] = 66, + [0][1][2][1][RTW89_ACMA][25] = 127, + [0][1][2][1][RTW89_CHILE][25] = 30, + [0][1][2][1][RTW89_UKRAINE][25] = 28, + [0][1][2][1][RTW89_MEXICO][25] = 68, + [0][1][2][1][RTW89_CN][25] = 127, + [0][1][2][1][RTW89_QATAR][25] = 28, + [0][1][2][1][RTW89_FCC][27] = 68, + [0][1][2][1][RTW89_ETSI][27] = 34, + [0][1][2][1][RTW89_MKK][27] = 70, + [0][1][2][1][RTW89_IC][27] = 127, + [0][1][2][1][RTW89_KCC][27] = 66, + [0][1][2][1][RTW89_ACMA][27] = 127, + [0][1][2][1][RTW89_CHILE][27] = 30, + [0][1][2][1][RTW89_UKRAINE][27] = 28, + [0][1][2][1][RTW89_MEXICO][27] = 68, + [0][1][2][1][RTW89_CN][27] = 127, + [0][1][2][1][RTW89_QATAR][27] = 28, + [0][1][2][1][RTW89_FCC][29] = 68, + [0][1][2][1][RTW89_ETSI][29] = 34, + [0][1][2][1][RTW89_MKK][29] = 70, + [0][1][2][1][RTW89_IC][29] = 127, + [0][1][2][1][RTW89_KCC][29] = 66, + [0][1][2][1][RTW89_ACMA][29] = 127, + [0][1][2][1][RTW89_CHILE][29] = 30, + [0][1][2][1][RTW89_UKRAINE][29] = 28, + [0][1][2][1][RTW89_MEXICO][29] = 68, + [0][1][2][1][RTW89_CN][29] = 127, + [0][1][2][1][RTW89_QATAR][29] = 28, + [0][1][2][1][RTW89_FCC][31] = 68, + [0][1][2][1][RTW89_ETSI][31] = 34, + [0][1][2][1][RTW89_MKK][31] = 70, + [0][1][2][1][RTW89_IC][31] = 62, + [0][1][2][1][RTW89_KCC][31] = 66, + [0][1][2][1][RTW89_ACMA][31] = 34, + [0][1][2][1][RTW89_CHILE][31] = 30, + [0][1][2][1][RTW89_UKRAINE][31] = 28, + [0][1][2][1][RTW89_MEXICO][31] = 68, + [0][1][2][1][RTW89_CN][31] = 127, + [0][1][2][1][RTW89_QATAR][31] = 28, + [0][1][2][1][RTW89_FCC][33] = 68, + [0][1][2][1][RTW89_ETSI][33] = 34, + [0][1][2][1][RTW89_MKK][33] = 70, + [0][1][2][1][RTW89_IC][33] = 62, + [0][1][2][1][RTW89_KCC][33] = 66, + [0][1][2][1][RTW89_ACMA][33] = 34, + [0][1][2][1][RTW89_CHILE][33] = 30, + [0][1][2][1][RTW89_UKRAINE][33] = 28, + [0][1][2][1][RTW89_MEXICO][33] = 68, + [0][1][2][1][RTW89_CN][33] = 127, + [0][1][2][1][RTW89_QATAR][33] = 28, + [0][1][2][1][RTW89_FCC][35] = 64, + [0][1][2][1][RTW89_ETSI][35] = 34, + [0][1][2][1][RTW89_MKK][35] = 70, + [0][1][2][1][RTW89_IC][35] = 62, + [0][1][2][1][RTW89_KCC][35] = 66, + [0][1][2][1][RTW89_ACMA][35] = 34, + [0][1][2][1][RTW89_CHILE][35] = 30, + [0][1][2][1][RTW89_UKRAINE][35] = 28, + [0][1][2][1][RTW89_MEXICO][35] = 64, + [0][1][2][1][RTW89_CN][35] = 127, + [0][1][2][1][RTW89_QATAR][35] = 28, + [0][1][2][1][RTW89_FCC][37] = 68, + [0][1][2][1][RTW89_ETSI][37] = 127, + [0][1][2][1][RTW89_MKK][37] = 70, + [0][1][2][1][RTW89_IC][37] = 62, + [0][1][2][1][RTW89_KCC][37] = 66, + [0][1][2][1][RTW89_ACMA][37] = 68, + [0][1][2][1][RTW89_CHILE][37] = 30, + [0][1][2][1][RTW89_UKRAINE][37] = 127, + [0][1][2][1][RTW89_MEXICO][37] = 68, + [0][1][2][1][RTW89_CN][37] = 127, + [0][1][2][1][RTW89_QATAR][37] = 127, + [0][1][2][1][RTW89_FCC][38] = 76, + [0][1][2][1][RTW89_ETSI][38] = 4, + [0][1][2][1][RTW89_MKK][38] = 127, + [0][1][2][1][RTW89_IC][38] = 76, + [0][1][2][1][RTW89_KCC][38] = 66, + [0][1][2][1][RTW89_ACMA][38] = 76, + [0][1][2][1][RTW89_CHILE][38] = 30, + [0][1][2][1][RTW89_UKRAINE][38] = 4, + [0][1][2][1][RTW89_MEXICO][38] = 76, + [0][1][2][1][RTW89_CN][38] = 68, + [0][1][2][1][RTW89_QATAR][38] = 4, + [0][1][2][1][RTW89_FCC][40] = 76, + [0][1][2][1][RTW89_ETSI][40] = 4, + [0][1][2][1][RTW89_MKK][40] = 127, + [0][1][2][1][RTW89_IC][40] = 76, + [0][1][2][1][RTW89_KCC][40] = 66, + [0][1][2][1][RTW89_ACMA][40] = 76, + [0][1][2][1][RTW89_CHILE][40] = 30, + [0][1][2][1][RTW89_UKRAINE][40] = 4, + [0][1][2][1][RTW89_MEXICO][40] = 76, + [0][1][2][1][RTW89_CN][40] = 70, + [0][1][2][1][RTW89_QATAR][40] = 4, + [0][1][2][1][RTW89_FCC][42] = 76, + [0][1][2][1][RTW89_ETSI][42] = 4, + [0][1][2][1][RTW89_MKK][42] = 127, + [0][1][2][1][RTW89_IC][42] = 76, + [0][1][2][1][RTW89_KCC][42] = 66, + [0][1][2][1][RTW89_ACMA][42] = 76, + [0][1][2][1][RTW89_CHILE][42] = 30, + [0][1][2][1][RTW89_UKRAINE][42] = 4, + [0][1][2][1][RTW89_MEXICO][42] = 76, + [0][1][2][1][RTW89_CN][42] = 70, + [0][1][2][1][RTW89_QATAR][42] = 4, + [0][1][2][1][RTW89_FCC][44] = 76, + [0][1][2][1][RTW89_ETSI][44] = 4, + [0][1][2][1][RTW89_MKK][44] = 127, + [0][1][2][1][RTW89_IC][44] = 76, + [0][1][2][1][RTW89_KCC][44] = 66, + [0][1][2][1][RTW89_ACMA][44] = 76, + [0][1][2][1][RTW89_CHILE][44] = 30, + [0][1][2][1][RTW89_UKRAINE][44] = 4, + [0][1][2][1][RTW89_MEXICO][44] = 76, + [0][1][2][1][RTW89_CN][44] = 70, + [0][1][2][1][RTW89_QATAR][44] = 4, + [0][1][2][1][RTW89_FCC][46] = 76, + [0][1][2][1][RTW89_ETSI][46] = 4, + [0][1][2][1][RTW89_MKK][46] = 127, + [0][1][2][1][RTW89_IC][46] = 76, + [0][1][2][1][RTW89_KCC][46] = 66, + [0][1][2][1][RTW89_ACMA][46] = 76, + [0][1][2][1][RTW89_CHILE][46] = 30, + [0][1][2][1][RTW89_UKRAINE][46] = 4, + [0][1][2][1][RTW89_MEXICO][46] = 76, + [0][1][2][1][RTW89_CN][46] = 70, + [0][1][2][1][RTW89_QATAR][46] = 4, + [1][0][2][0][RTW89_FCC][1] = 68, + [1][0][2][0][RTW89_ETSI][1] = 64, + [1][0][2][0][RTW89_MKK][1] = 62, + [1][0][2][0][RTW89_IC][1] = 64, + [1][0][2][0][RTW89_KCC][1] = 72, + [1][0][2][0][RTW89_ACMA][1] = 64, + [1][0][2][0][RTW89_CHILE][1] = 30, + [1][0][2][0][RTW89_UKRAINE][1] = 52, + [1][0][2][0][RTW89_MEXICO][1] = 62, + [1][0][2][0][RTW89_CN][1] = 64, + [1][0][2][0][RTW89_QATAR][1] = 64, + [1][0][2][0][RTW89_FCC][5] = 72, + [1][0][2][0][RTW89_ETSI][5] = 64, + [1][0][2][0][RTW89_MKK][5] = 62, + [1][0][2][0][RTW89_IC][5] = 64, + [1][0][2][0][RTW89_KCC][5] = 72, + [1][0][2][0][RTW89_ACMA][5] = 64, + [1][0][2][0][RTW89_CHILE][5] = 30, + [1][0][2][0][RTW89_UKRAINE][5] = 52, + [1][0][2][0][RTW89_MEXICO][5] = 62, + [1][0][2][0][RTW89_CN][5] = 64, + [1][0][2][0][RTW89_QATAR][5] = 64, + [1][0][2][0][RTW89_FCC][9] = 72, + [1][0][2][0][RTW89_ETSI][9] = 64, + [1][0][2][0][RTW89_MKK][9] = 62, + [1][0][2][0][RTW89_IC][9] = 64, + [1][0][2][0][RTW89_KCC][9] = 72, + [1][0][2][0][RTW89_ACMA][9] = 64, + [1][0][2][0][RTW89_CHILE][9] = 54, + [1][0][2][0][RTW89_UKRAINE][9] = 52, + [1][0][2][0][RTW89_MEXICO][9] = 72, + [1][0][2][0][RTW89_CN][9] = 64, + [1][0][2][0][RTW89_QATAR][9] = 64, + [1][0][2][0][RTW89_FCC][13] = 66, + [1][0][2][0][RTW89_ETSI][13] = 64, + [1][0][2][0][RTW89_MKK][13] = 62, + [1][0][2][0][RTW89_IC][13] = 64, + [1][0][2][0][RTW89_KCC][13] = 68, + [1][0][2][0][RTW89_ACMA][13] = 64, + [1][0][2][0][RTW89_CHILE][13] = 54, + [1][0][2][0][RTW89_UKRAINE][13] = 52, + [1][0][2][0][RTW89_MEXICO][13] = 66, + [1][0][2][0][RTW89_CN][13] = 64, + [1][0][2][0][RTW89_QATAR][13] = 64, + [1][0][2][0][RTW89_FCC][16] = 62, + [1][0][2][0][RTW89_ETSI][16] = 64, + [1][0][2][0][RTW89_MKK][16] = 72, + [1][0][2][0][RTW89_IC][16] = 62, + [1][0][2][0][RTW89_KCC][16] = 72, + [1][0][2][0][RTW89_ACMA][16] = 64, + [1][0][2][0][RTW89_CHILE][16] = 54, + [1][0][2][0][RTW89_UKRAINE][16] = 52, + [1][0][2][0][RTW89_MEXICO][16] = 62, + [1][0][2][0][RTW89_CN][16] = 127, + [1][0][2][0][RTW89_QATAR][16] = 52, + [1][0][2][0][RTW89_FCC][20] = 72, + [1][0][2][0][RTW89_ETSI][20] = 64, + [1][0][2][0][RTW89_MKK][20] = 72, + [1][0][2][0][RTW89_IC][20] = 72, + [1][0][2][0][RTW89_KCC][20] = 72, + [1][0][2][0][RTW89_ACMA][20] = 64, + [1][0][2][0][RTW89_CHILE][20] = 54, + [1][0][2][0][RTW89_UKRAINE][20] = 52, + [1][0][2][0][RTW89_MEXICO][20] = 72, + [1][0][2][0][RTW89_CN][20] = 127, + [1][0][2][0][RTW89_QATAR][20] = 52, + [1][0][2][0][RTW89_FCC][24] = 72, + [1][0][2][0][RTW89_ETSI][24] = 64, + [1][0][2][0][RTW89_MKK][24] = 72, + [1][0][2][0][RTW89_IC][24] = 127, + [1][0][2][0][RTW89_KCC][24] = 72, + [1][0][2][0][RTW89_ACMA][24] = 127, + [1][0][2][0][RTW89_CHILE][24] = 54, + [1][0][2][0][RTW89_UKRAINE][24] = 52, + [1][0][2][0][RTW89_MEXICO][24] = 72, + [1][0][2][0][RTW89_CN][24] = 127, + [1][0][2][0][RTW89_QATAR][24] = 52, + [1][0][2][0][RTW89_FCC][28] = 72, + [1][0][2][0][RTW89_ETSI][28] = 64, + [1][0][2][0][RTW89_MKK][28] = 72, + [1][0][2][0][RTW89_IC][28] = 127, + [1][0][2][0][RTW89_KCC][28] = 72, + [1][0][2][0][RTW89_ACMA][28] = 127, + [1][0][2][0][RTW89_CHILE][28] = 54, + [1][0][2][0][RTW89_UKRAINE][28] = 52, + [1][0][2][0][RTW89_MEXICO][28] = 72, + [1][0][2][0][RTW89_CN][28] = 127, + [1][0][2][0][RTW89_QATAR][28] = 52, + [1][0][2][0][RTW89_FCC][32] = 72, + [1][0][2][0][RTW89_ETSI][32] = 64, + [1][0][2][0][RTW89_MKK][32] = 72, + [1][0][2][0][RTW89_IC][32] = 72, + [1][0][2][0][RTW89_KCC][32] = 72, + [1][0][2][0][RTW89_ACMA][32] = 64, + [1][0][2][0][RTW89_CHILE][32] = 54, + [1][0][2][0][RTW89_UKRAINE][32] = 52, + [1][0][2][0][RTW89_MEXICO][32] = 72, + [1][0][2][0][RTW89_CN][32] = 127, + [1][0][2][0][RTW89_QATAR][32] = 52, + [1][0][2][0][RTW89_FCC][36] = 72, + [1][0][2][0][RTW89_ETSI][36] = 127, + [1][0][2][0][RTW89_MKK][36] = 72, + [1][0][2][0][RTW89_IC][36] = 72, + [1][0][2][0][RTW89_KCC][36] = 72, + [1][0][2][0][RTW89_ACMA][36] = 72, + [1][0][2][0][RTW89_CHILE][36] = 54, + [1][0][2][0][RTW89_UKRAINE][36] = 127, + [1][0][2][0][RTW89_MEXICO][36] = 72, + [1][0][2][0][RTW89_CN][36] = 127, + [1][0][2][0][RTW89_QATAR][36] = 127, + [1][0][2][0][RTW89_FCC][39] = 72, + [1][0][2][0][RTW89_ETSI][39] = 28, + [1][0][2][0][RTW89_MKK][39] = 127, + [1][0][2][0][RTW89_IC][39] = 72, + [1][0][2][0][RTW89_KCC][39] = 72, + [1][0][2][0][RTW89_ACMA][39] = 72, + [1][0][2][0][RTW89_CHILE][39] = 54, + [1][0][2][0][RTW89_UKRAINE][39] = 28, + [1][0][2][0][RTW89_MEXICO][39] = 72, + [1][0][2][0][RTW89_CN][39] = 68, + [1][0][2][0][RTW89_QATAR][39] = 28, + [1][0][2][0][RTW89_FCC][43] = 72, + [1][0][2][0][RTW89_ETSI][43] = 28, + [1][0][2][0][RTW89_MKK][43] = 127, + [1][0][2][0][RTW89_IC][43] = 72, + [1][0][2][0][RTW89_KCC][43] = 72, + [1][0][2][0][RTW89_ACMA][43] = 72, + [1][0][2][0][RTW89_CHILE][43] = 54, + [1][0][2][0][RTW89_UKRAINE][43] = 28, + [1][0][2][0][RTW89_MEXICO][43] = 72, + [1][0][2][0][RTW89_CN][43] = 72, + [1][0][2][0][RTW89_QATAR][43] = 28, + [1][1][2][0][RTW89_FCC][1] = 58, + [1][1][2][0][RTW89_ETSI][1] = 52, + [1][1][2][0][RTW89_MKK][1] = 50, + [1][1][2][0][RTW89_IC][1] = 52, + [1][1][2][0][RTW89_KCC][1] = 66, + [1][1][2][0][RTW89_ACMA][1] = 52, + [1][1][2][0][RTW89_CHILE][1] = 18, + [1][1][2][0][RTW89_UKRAINE][1] = 40, + [1][1][2][0][RTW89_MEXICO][1] = 50, + [1][1][2][0][RTW89_CN][1] = 52, + [1][1][2][0][RTW89_QATAR][1] = 52, + [1][1][2][0][RTW89_FCC][5] = 72, + [1][1][2][0][RTW89_ETSI][5] = 52, + [1][1][2][0][RTW89_MKK][5] = 50, + [1][1][2][0][RTW89_IC][5] = 52, + [1][1][2][0][RTW89_KCC][5] = 50, + [1][1][2][0][RTW89_ACMA][5] = 52, + [1][1][2][0][RTW89_CHILE][5] = 18, + [1][1][2][0][RTW89_UKRAINE][5] = 40, + [1][1][2][0][RTW89_MEXICO][5] = 50, + [1][1][2][0][RTW89_CN][5] = 52, + [1][1][2][0][RTW89_QATAR][5] = 52, + [1][1][2][0][RTW89_FCC][9] = 72, + [1][1][2][0][RTW89_ETSI][9] = 52, + [1][1][2][0][RTW89_MKK][9] = 50, + [1][1][2][0][RTW89_IC][9] = 52, + [1][1][2][0][RTW89_KCC][9] = 66, + [1][1][2][0][RTW89_ACMA][9] = 52, + [1][1][2][0][RTW89_CHILE][9] = 42, + [1][1][2][0][RTW89_UKRAINE][9] = 40, + [1][1][2][0][RTW89_MEXICO][9] = 72, + [1][1][2][0][RTW89_CN][9] = 52, + [1][1][2][0][RTW89_QATAR][9] = 52, + [1][1][2][0][RTW89_FCC][13] = 58, + [1][1][2][0][RTW89_ETSI][13] = 52, + [1][1][2][0][RTW89_MKK][13] = 50, + [1][1][2][0][RTW89_IC][13] = 52, + [1][1][2][0][RTW89_KCC][13] = 66, + [1][1][2][0][RTW89_ACMA][13] = 52, + [1][1][2][0][RTW89_CHILE][13] = 42, + [1][1][2][0][RTW89_UKRAINE][13] = 40, + [1][1][2][0][RTW89_MEXICO][13] = 58, + [1][1][2][0][RTW89_CN][13] = 52, + [1][1][2][0][RTW89_QATAR][13] = 52, + [1][1][2][0][RTW89_FCC][16] = 56, + [1][1][2][0][RTW89_ETSI][16] = 52, + [1][1][2][0][RTW89_MKK][16] = 72, + [1][1][2][0][RTW89_IC][16] = 56, + [1][1][2][0][RTW89_KCC][16] = 64, + [1][1][2][0][RTW89_ACMA][16] = 52, + [1][1][2][0][RTW89_CHILE][16] = 42, + [1][1][2][0][RTW89_UKRAINE][16] = 40, + [1][1][2][0][RTW89_MEXICO][16] = 56, + [1][1][2][0][RTW89_CN][16] = 127, + [1][1][2][0][RTW89_QATAR][16] = 40, + [1][1][2][0][RTW89_FCC][20] = 72, + [1][1][2][0][RTW89_ETSI][20] = 52, + [1][1][2][0][RTW89_MKK][20] = 72, + [1][1][2][0][RTW89_IC][20] = 72, + [1][1][2][0][RTW89_KCC][20] = 66, + [1][1][2][0][RTW89_ACMA][20] = 52, + [1][1][2][0][RTW89_CHILE][20] = 42, + [1][1][2][0][RTW89_UKRAINE][20] = 40, + [1][1][2][0][RTW89_MEXICO][20] = 72, + [1][1][2][0][RTW89_CN][20] = 127, + [1][1][2][0][RTW89_QATAR][20] = 40, + [1][1][2][0][RTW89_FCC][24] = 72, + [1][1][2][0][RTW89_ETSI][24] = 52, + [1][1][2][0][RTW89_MKK][24] = 72, + [1][1][2][0][RTW89_IC][24] = 127, + [1][1][2][0][RTW89_KCC][24] = 66, + [1][1][2][0][RTW89_ACMA][24] = 127, + [1][1][2][0][RTW89_CHILE][24] = 42, + [1][1][2][0][RTW89_UKRAINE][24] = 40, + [1][1][2][0][RTW89_MEXICO][24] = 72, + [1][1][2][0][RTW89_CN][24] = 127, + [1][1][2][0][RTW89_QATAR][24] = 40, + [1][1][2][0][RTW89_FCC][28] = 72, + [1][1][2][0][RTW89_ETSI][28] = 52, + [1][1][2][0][RTW89_MKK][28] = 72, + [1][1][2][0][RTW89_IC][28] = 127, + [1][1][2][0][RTW89_KCC][28] = 66, + [1][1][2][0][RTW89_ACMA][28] = 127, + [1][1][2][0][RTW89_CHILE][28] = 42, + [1][1][2][0][RTW89_UKRAINE][28] = 40, + [1][1][2][0][RTW89_MEXICO][28] = 72, + [1][1][2][0][RTW89_CN][28] = 127, + [1][1][2][0][RTW89_QATAR][28] = 40, + [1][1][2][0][RTW89_FCC][32] = 68, + [1][1][2][0][RTW89_ETSI][32] = 52, + [1][1][2][0][RTW89_MKK][32] = 72, + [1][1][2][0][RTW89_IC][32] = 68, + [1][1][2][0][RTW89_KCC][32] = 66, + [1][1][2][0][RTW89_ACMA][32] = 52, + [1][1][2][0][RTW89_CHILE][32] = 42, + [1][1][2][0][RTW89_UKRAINE][32] = 40, + [1][1][2][0][RTW89_MEXICO][32] = 68, + [1][1][2][0][RTW89_CN][32] = 127, + [1][1][2][0][RTW89_QATAR][32] = 40, + [1][1][2][0][RTW89_FCC][36] = 72, + [1][1][2][0][RTW89_ETSI][36] = 127, + [1][1][2][0][RTW89_MKK][36] = 72, + [1][1][2][0][RTW89_IC][36] = 72, + [1][1][2][0][RTW89_KCC][36] = 66, + [1][1][2][0][RTW89_ACMA][36] = 72, + [1][1][2][0][RTW89_CHILE][36] = 42, + [1][1][2][0][RTW89_UKRAINE][36] = 127, + [1][1][2][0][RTW89_MEXICO][36] = 72, + [1][1][2][0][RTW89_CN][36] = 127, + [1][1][2][0][RTW89_QATAR][36] = 127, + [1][1][2][0][RTW89_FCC][39] = 72, + [1][1][2][0][RTW89_ETSI][39] = 16, + [1][1][2][0][RTW89_MKK][39] = 127, + [1][1][2][0][RTW89_IC][39] = 72, + [1][1][2][0][RTW89_KCC][39] = 66, + [1][1][2][0][RTW89_ACMA][39] = 72, + [1][1][2][0][RTW89_CHILE][39] = 42, + [1][1][2][0][RTW89_UKRAINE][39] = 16, + [1][1][2][0][RTW89_MEXICO][39] = 72, + [1][1][2][0][RTW89_CN][39] = 68, + [1][1][2][0][RTW89_QATAR][39] = 16, + [1][1][2][0][RTW89_FCC][43] = 72, + [1][1][2][0][RTW89_ETSI][43] = 16, + [1][1][2][0][RTW89_MKK][43] = 127, + [1][1][2][0][RTW89_IC][43] = 72, + [1][1][2][0][RTW89_KCC][43] = 66, + [1][1][2][0][RTW89_ACMA][43] = 72, + [1][1][2][0][RTW89_CHILE][43] = 42, + [1][1][2][0][RTW89_UKRAINE][43] = 16, + [1][1][2][0][RTW89_MEXICO][43] = 72, + [1][1][2][0][RTW89_CN][43] = 72, + [1][1][2][0][RTW89_QATAR][43] = 16, + [1][1][2][1][RTW89_FCC][1] = 58, + [1][1][2][1][RTW89_ETSI][1] = 40, + [1][1][2][1][RTW89_MKK][1] = 50, + [1][1][2][1][RTW89_IC][1] = 40, + [1][1][2][1][RTW89_KCC][1] = 66, + [1][1][2][1][RTW89_ACMA][1] = 40, + [1][1][2][1][RTW89_CHILE][1] = 6, + [1][1][2][1][RTW89_UKRAINE][1] = 28, + [1][1][2][1][RTW89_MEXICO][1] = 50, + [1][1][2][1][RTW89_CN][1] = 40, + [1][1][2][1][RTW89_QATAR][1] = 40, + [1][1][2][1][RTW89_FCC][5] = 68, + [1][1][2][1][RTW89_ETSI][5] = 40, + [1][1][2][1][RTW89_MKK][5] = 50, + [1][1][2][1][RTW89_IC][5] = 40, + [1][1][2][1][RTW89_KCC][5] = 50, + [1][1][2][1][RTW89_ACMA][5] = 40, + [1][1][2][1][RTW89_CHILE][5] = 6, + [1][1][2][1][RTW89_UKRAINE][5] = 28, + [1][1][2][1][RTW89_MEXICO][5] = 50, + [1][1][2][1][RTW89_CN][5] = 40, + [1][1][2][1][RTW89_QATAR][5] = 40, + [1][1][2][1][RTW89_FCC][9] = 68, + [1][1][2][1][RTW89_ETSI][9] = 40, + [1][1][2][1][RTW89_MKK][9] = 50, + [1][1][2][1][RTW89_IC][9] = 40, + [1][1][2][1][RTW89_KCC][9] = 66, + [1][1][2][1][RTW89_ACMA][9] = 40, + [1][1][2][1][RTW89_CHILE][9] = 30, + [1][1][2][1][RTW89_UKRAINE][9] = 28, + [1][1][2][1][RTW89_MEXICO][9] = 68, + [1][1][2][1][RTW89_CN][9] = 40, + [1][1][2][1][RTW89_QATAR][9] = 40, + [1][1][2][1][RTW89_FCC][13] = 58, + [1][1][2][1][RTW89_ETSI][13] = 40, + [1][1][2][1][RTW89_MKK][13] = 50, + [1][1][2][1][RTW89_IC][13] = 40, + [1][1][2][1][RTW89_KCC][13] = 66, + [1][1][2][1][RTW89_ACMA][13] = 40, + [1][1][2][1][RTW89_CHILE][13] = 30, + [1][1][2][1][RTW89_UKRAINE][13] = 28, + [1][1][2][1][RTW89_MEXICO][13] = 58, + [1][1][2][1][RTW89_CN][13] = 40, + [1][1][2][1][RTW89_QATAR][13] = 40, + [1][1][2][1][RTW89_FCC][16] = 56, + [1][1][2][1][RTW89_ETSI][16] = 40, + [1][1][2][1][RTW89_MKK][16] = 72, + [1][1][2][1][RTW89_IC][16] = 56, + [1][1][2][1][RTW89_KCC][16] = 64, + [1][1][2][1][RTW89_ACMA][16] = 40, + [1][1][2][1][RTW89_CHILE][16] = 30, + [1][1][2][1][RTW89_UKRAINE][16] = 28, + [1][1][2][1][RTW89_MEXICO][16] = 56, + [1][1][2][1][RTW89_CN][16] = 127, + [1][1][2][1][RTW89_QATAR][16] = 28, + [1][1][2][1][RTW89_FCC][20] = 68, + [1][1][2][1][RTW89_ETSI][20] = 40, + [1][1][2][1][RTW89_MKK][20] = 72, + [1][1][2][1][RTW89_IC][20] = 68, + [1][1][2][1][RTW89_KCC][20] = 66, + [1][1][2][1][RTW89_ACMA][20] = 40, + [1][1][2][1][RTW89_CHILE][20] = 30, + [1][1][2][1][RTW89_UKRAINE][20] = 28, + [1][1][2][1][RTW89_MEXICO][20] = 68, + [1][1][2][1][RTW89_CN][20] = 127, + [1][1][2][1][RTW89_QATAR][20] = 28, + [1][1][2][1][RTW89_FCC][24] = 68, + [1][1][2][1][RTW89_ETSI][24] = 40, + [1][1][2][1][RTW89_MKK][24] = 72, + [1][1][2][1][RTW89_IC][24] = 127, + [1][1][2][1][RTW89_KCC][24] = 66, + [1][1][2][1][RTW89_ACMA][24] = 127, + [1][1][2][1][RTW89_CHILE][24] = 30, + [1][1][2][1][RTW89_UKRAINE][24] = 28, + [1][1][2][1][RTW89_MEXICO][24] = 68, + [1][1][2][1][RTW89_CN][24] = 127, + [1][1][2][1][RTW89_QATAR][24] = 28, + [1][1][2][1][RTW89_FCC][28] = 68, + [1][1][2][1][RTW89_ETSI][28] = 40, + [1][1][2][1][RTW89_MKK][28] = 72, + [1][1][2][1][RTW89_IC][28] = 127, + [1][1][2][1][RTW89_KCC][28] = 66, + [1][1][2][1][RTW89_ACMA][28] = 127, + [1][1][2][1][RTW89_CHILE][28] = 30, + [1][1][2][1][RTW89_UKRAINE][28] = 28, + [1][1][2][1][RTW89_MEXICO][28] = 68, + [1][1][2][1][RTW89_CN][28] = 127, + [1][1][2][1][RTW89_QATAR][28] = 28, + [1][1][2][1][RTW89_FCC][32] = 68, + [1][1][2][1][RTW89_ETSI][32] = 40, + [1][1][2][1][RTW89_MKK][32] = 72, + [1][1][2][1][RTW89_IC][32] = 68, + [1][1][2][1][RTW89_KCC][32] = 66, + [1][1][2][1][RTW89_ACMA][32] = 40, + [1][1][2][1][RTW89_CHILE][32] = 30, + [1][1][2][1][RTW89_UKRAINE][32] = 28, + [1][1][2][1][RTW89_MEXICO][32] = 68, + [1][1][2][1][RTW89_CN][32] = 127, + [1][1][2][1][RTW89_QATAR][32] = 28, + [1][1][2][1][RTW89_FCC][36] = 68, + [1][1][2][1][RTW89_ETSI][36] = 127, + [1][1][2][1][RTW89_MKK][36] = 72, + [1][1][2][1][RTW89_IC][36] = 68, + [1][1][2][1][RTW89_KCC][36] = 66, + [1][1][2][1][RTW89_ACMA][36] = 68, + [1][1][2][1][RTW89_CHILE][36] = 30, + [1][1][2][1][RTW89_UKRAINE][36] = 127, + [1][1][2][1][RTW89_MEXICO][36] = 68, + [1][1][2][1][RTW89_CN][36] = 127, + [1][1][2][1][RTW89_QATAR][36] = 127, + [1][1][2][1][RTW89_FCC][39] = 72, + [1][1][2][1][RTW89_ETSI][39] = 4, + [1][1][2][1][RTW89_MKK][39] = 127, + [1][1][2][1][RTW89_IC][39] = 72, + [1][1][2][1][RTW89_KCC][39] = 66, + [1][1][2][1][RTW89_ACMA][39] = 72, + [1][1][2][1][RTW89_CHILE][39] = 30, + [1][1][2][1][RTW89_UKRAINE][39] = 4, + [1][1][2][1][RTW89_MEXICO][39] = 72, + [1][1][2][1][RTW89_CN][39] = 62, + [1][1][2][1][RTW89_QATAR][39] = 4, + [1][1][2][1][RTW89_FCC][43] = 72, + [1][1][2][1][RTW89_ETSI][43] = 4, + [1][1][2][1][RTW89_MKK][43] = 127, + [1][1][2][1][RTW89_IC][43] = 72, + [1][1][2][1][RTW89_KCC][43] = 66, + [1][1][2][1][RTW89_ACMA][43] = 72, + [1][1][2][1][RTW89_CHILE][43] = 30, + [1][1][2][1][RTW89_UKRAINE][43] = 4, + [1][1][2][1][RTW89_MEXICO][43] = 72, + [1][1][2][1][RTW89_CN][43] = 72, + [1][1][2][1][RTW89_QATAR][43] = 4, + [2][0][2][0][RTW89_FCC][3] = 64, + [2][0][2][0][RTW89_ETSI][3] = 64, + [2][0][2][0][RTW89_MKK][3] = 64, + [2][0][2][0][RTW89_IC][3] = 62, + [2][0][2][0][RTW89_KCC][3] = 72, + [2][0][2][0][RTW89_ACMA][3] = 64, + [2][0][2][0][RTW89_CHILE][3] = 30, + [2][0][2][0][RTW89_UKRAINE][3] = 52, + [2][0][2][0][RTW89_MEXICO][3] = 62, + [2][0][2][0][RTW89_CN][3] = 64, + [2][0][2][0][RTW89_QATAR][3] = 64, + [2][0][2][0][RTW89_FCC][11] = 64, + [2][0][2][0][RTW89_ETSI][11] = 64, + [2][0][2][0][RTW89_MKK][11] = 64, + [2][0][2][0][RTW89_IC][11] = 62, + [2][0][2][0][RTW89_KCC][11] = 72, + [2][0][2][0][RTW89_ACMA][11] = 64, + [2][0][2][0][RTW89_CHILE][11] = 54, + [2][0][2][0][RTW89_UKRAINE][11] = 52, + [2][0][2][0][RTW89_MEXICO][11] = 64, + [2][0][2][0][RTW89_CN][11] = 64, + [2][0][2][0][RTW89_QATAR][11] = 64, + [2][0][2][0][RTW89_FCC][18] = 62, + [2][0][2][0][RTW89_ETSI][18] = 64, + [2][0][2][0][RTW89_MKK][18] = 72, + [2][0][2][0][RTW89_IC][18] = 66, + [2][0][2][0][RTW89_KCC][18] = 70, + [2][0][2][0][RTW89_ACMA][18] = 64, + [2][0][2][0][RTW89_CHILE][18] = 54, + [2][0][2][0][RTW89_UKRAINE][18] = 52, + [2][0][2][0][RTW89_MEXICO][18] = 62, + [2][0][2][0][RTW89_CN][18] = 127, + [2][0][2][0][RTW89_QATAR][18] = 52, + [2][0][2][0][RTW89_FCC][26] = 72, + [2][0][2][0][RTW89_ETSI][26] = 64, + [2][0][2][0][RTW89_MKK][26] = 72, + [2][0][2][0][RTW89_IC][26] = 127, + [2][0][2][0][RTW89_KCC][26] = 72, + [2][0][2][0][RTW89_ACMA][26] = 127, + [2][0][2][0][RTW89_CHILE][26] = 54, + [2][0][2][0][RTW89_UKRAINE][26] = 52, + [2][0][2][0][RTW89_MEXICO][26] = 72, + [2][0][2][0][RTW89_CN][26] = 127, + [2][0][2][0][RTW89_QATAR][26] = 52, + [2][0][2][0][RTW89_FCC][34] = 72, + [2][0][2][0][RTW89_ETSI][34] = 127, + [2][0][2][0][RTW89_MKK][34] = 72, + [2][0][2][0][RTW89_IC][34] = 72, + [2][0][2][0][RTW89_KCC][34] = 72, + [2][0][2][0][RTW89_ACMA][34] = 72, + [2][0][2][0][RTW89_CHILE][34] = 54, + [2][0][2][0][RTW89_UKRAINE][34] = 127, + [2][0][2][0][RTW89_MEXICO][34] = 72, + [2][0][2][0][RTW89_CN][34] = 127, + [2][0][2][0][RTW89_QATAR][34] = 127, + [2][0][2][0][RTW89_FCC][41] = 72, + [2][0][2][0][RTW89_ETSI][41] = 28, + [2][0][2][0][RTW89_MKK][41] = 127, + [2][0][2][0][RTW89_IC][41] = 72, + [2][0][2][0][RTW89_KCC][41] = 68, + [2][0][2][0][RTW89_ACMA][41] = 72, + [2][0][2][0][RTW89_CHILE][41] = 54, + [2][0][2][0][RTW89_UKRAINE][41] = 28, + [2][0][2][0][RTW89_MEXICO][41] = 72, + [2][0][2][0][RTW89_CN][41] = 68, + [2][0][2][0][RTW89_QATAR][41] = 28, + [2][1][2][0][RTW89_FCC][3] = 56, + [2][1][2][0][RTW89_ETSI][3] = 52, + [2][1][2][0][RTW89_MKK][3] = 52, + [2][1][2][0][RTW89_IC][3] = 52, + [2][1][2][0][RTW89_KCC][3] = 66, + [2][1][2][0][RTW89_ACMA][3] = 52, + [2][1][2][0][RTW89_CHILE][3] = 18, + [2][1][2][0][RTW89_UKRAINE][3] = 40, + [2][1][2][0][RTW89_MEXICO][3] = 50, + [2][1][2][0][RTW89_CN][3] = 52, + [2][1][2][0][RTW89_QATAR][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 56, + [2][1][2][0][RTW89_ETSI][11] = 52, + [2][1][2][0][RTW89_MKK][11] = 52, + [2][1][2][0][RTW89_IC][11] = 52, + [2][1][2][0][RTW89_KCC][11] = 64, + [2][1][2][0][RTW89_ACMA][11] = 52, + [2][1][2][0][RTW89_CHILE][11] = 42, + [2][1][2][0][RTW89_UKRAINE][11] = 40, + [2][1][2][0][RTW89_MEXICO][11] = 56, + [2][1][2][0][RTW89_CN][11] = 52, + [2][1][2][0][RTW89_QATAR][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 56, + [2][1][2][0][RTW89_ETSI][18] = 52, + [2][1][2][0][RTW89_MKK][18] = 72, + [2][1][2][0][RTW89_IC][18] = 56, + [2][1][2][0][RTW89_KCC][18] = 58, + [2][1][2][0][RTW89_ACMA][18] = 52, + [2][1][2][0][RTW89_CHILE][18] = 42, + [2][1][2][0][RTW89_UKRAINE][18] = 40, + [2][1][2][0][RTW89_MEXICO][18] = 56, + [2][1][2][0][RTW89_CN][18] = 127, + [2][1][2][0][RTW89_QATAR][18] = 40, + [2][1][2][0][RTW89_FCC][26] = 72, + [2][1][2][0][RTW89_ETSI][26] = 52, + [2][1][2][0][RTW89_MKK][26] = 72, + [2][1][2][0][RTW89_IC][26] = 127, + [2][1][2][0][RTW89_KCC][26] = 64, + [2][1][2][0][RTW89_ACMA][26] = 127, + [2][1][2][0][RTW89_CHILE][26] = 42, + [2][1][2][0][RTW89_UKRAINE][26] = 40, + [2][1][2][0][RTW89_MEXICO][26] = 72, + [2][1][2][0][RTW89_CN][26] = 127, + [2][1][2][0][RTW89_QATAR][26] = 40, + [2][1][2][0][RTW89_FCC][34] = 72, + [2][1][2][0][RTW89_ETSI][34] = 127, + [2][1][2][0][RTW89_MKK][34] = 72, + [2][1][2][0][RTW89_IC][34] = 72, + [2][1][2][0][RTW89_KCC][34] = 64, + [2][1][2][0][RTW89_ACMA][34] = 72, + [2][1][2][0][RTW89_CHILE][34] = 42, + [2][1][2][0][RTW89_UKRAINE][34] = 127, + [2][1][2][0][RTW89_MEXICO][34] = 72, + [2][1][2][0][RTW89_CN][34] = 127, + [2][1][2][0][RTW89_QATAR][34] = 127, + [2][1][2][0][RTW89_FCC][41] = 72, + [2][1][2][0][RTW89_ETSI][41] = 16, + [2][1][2][0][RTW89_MKK][41] = 127, + [2][1][2][0][RTW89_IC][41] = 72, + [2][1][2][0][RTW89_KCC][41] = 58, + [2][1][2][0][RTW89_ACMA][41] = 72, + [2][1][2][0][RTW89_CHILE][41] = 42, + [2][1][2][0][RTW89_UKRAINE][41] = 16, + [2][1][2][0][RTW89_MEXICO][41] = 72, + [2][1][2][0][RTW89_CN][41] = 68, + [2][1][2][0][RTW89_QATAR][41] = 16, + [2][1][2][1][RTW89_FCC][3] = 56, + [2][1][2][1][RTW89_ETSI][3] = 40, + [2][1][2][1][RTW89_MKK][3] = 52, + [2][1][2][1][RTW89_IC][3] = 40, + [2][1][2][1][RTW89_KCC][3] = 66, + [2][1][2][1][RTW89_ACMA][3] = 40, + [2][1][2][1][RTW89_CHILE][3] = 6, + [2][1][2][1][RTW89_UKRAINE][3] = 28, + [2][1][2][1][RTW89_MEXICO][3] = 50, + [2][1][2][1][RTW89_CN][3] = 40, + [2][1][2][1][RTW89_QATAR][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 56, + [2][1][2][1][RTW89_ETSI][11] = 40, + [2][1][2][1][RTW89_MKK][11] = 52, + [2][1][2][1][RTW89_IC][11] = 40, + [2][1][2][1][RTW89_KCC][11] = 64, + [2][1][2][1][RTW89_ACMA][11] = 40, + [2][1][2][1][RTW89_CHILE][11] = 30, + [2][1][2][1][RTW89_UKRAINE][11] = 28, + [2][1][2][1][RTW89_MEXICO][11] = 56, + [2][1][2][1][RTW89_CN][11] = 40, + [2][1][2][1][RTW89_QATAR][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 56, + [2][1][2][1][RTW89_ETSI][18] = 40, + [2][1][2][1][RTW89_MKK][18] = 72, + [2][1][2][1][RTW89_IC][18] = 56, + [2][1][2][1][RTW89_KCC][18] = 58, + [2][1][2][1][RTW89_ACMA][18] = 40, + [2][1][2][1][RTW89_CHILE][18] = 30, + [2][1][2][1][RTW89_UKRAINE][18] = 28, + [2][1][2][1][RTW89_MEXICO][18] = 56, + [2][1][2][1][RTW89_CN][18] = 127, + [2][1][2][1][RTW89_QATAR][18] = 28, + [2][1][2][1][RTW89_FCC][26] = 68, + [2][1][2][1][RTW89_ETSI][26] = 40, + [2][1][2][1][RTW89_MKK][26] = 72, + [2][1][2][1][RTW89_IC][26] = 127, + [2][1][2][1][RTW89_KCC][26] = 64, + [2][1][2][1][RTW89_ACMA][26] = 127, + [2][1][2][1][RTW89_CHILE][26] = 30, + [2][1][2][1][RTW89_UKRAINE][26] = 28, + [2][1][2][1][RTW89_MEXICO][26] = 68, + [2][1][2][1][RTW89_CN][26] = 127, + [2][1][2][1][RTW89_QATAR][26] = 28, + [2][1][2][1][RTW89_FCC][34] = 68, + [2][1][2][1][RTW89_ETSI][34] = 127, + [2][1][2][1][RTW89_MKK][34] = 72, + [2][1][2][1][RTW89_IC][34] = 68, + [2][1][2][1][RTW89_KCC][34] = 64, + [2][1][2][1][RTW89_ACMA][34] = 68, + [2][1][2][1][RTW89_CHILE][34] = 30, + [2][1][2][1][RTW89_UKRAINE][34] = 127, + [2][1][2][1][RTW89_MEXICO][34] = 68, + [2][1][2][1][RTW89_CN][34] = 127, + [2][1][2][1][RTW89_QATAR][34] = 127, + [2][1][2][1][RTW89_FCC][41] = 72, + [2][1][2][1][RTW89_ETSI][41] = 4, + [2][1][2][1][RTW89_MKK][41] = 127, + [2][1][2][1][RTW89_IC][41] = 72, + [2][1][2][1][RTW89_KCC][41] = 58, + [2][1][2][1][RTW89_ACMA][41] = 72, + [2][1][2][1][RTW89_CHILE][41] = 30, + [2][1][2][1][RTW89_UKRAINE][41] = 4, + [2][1][2][1][RTW89_MEXICO][41] = 72, + [2][1][2][1][RTW89_CN][41] = 64, + [2][1][2][1][RTW89_QATAR][41] = 4, }; const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { - [0][0][0][0] = 32, - [0][0][0][1] = 32, - [0][0][0][2] = 32, - [0][0][0][3] = 32, - [0][0][0][4] = 32, - [0][0][0][5] = 32, - [0][0][0][6] = 32, - [0][0][0][7] = 32, - [0][0][0][8] = 32, - [0][0][0][9] = 32, - [0][0][0][10] = 32, - [0][0][0][11] = 32, - [0][0][0][12] = 32, - [0][0][0][13] = 0, - [0][1][0][0] = 20, - [0][1][0][1] = 20, - [0][1][0][2] = 20, - [0][1][0][3] = 20, - [0][1][0][4] = 20, - [0][1][0][5] = 20, - [0][1][0][6] = 20, - [0][1][0][7] = 20, - [0][1][0][8] = 20, - [0][1][0][9] = 20, - [0][1][0][10] = 20, - [0][1][0][11] = 20, - [0][1][0][12] = 20, - [0][1][0][13] = 0, - [1][0][0][0] = 42, - [1][0][0][1] = 42, - [1][0][0][2] = 42, - [1][0][0][3] = 42, - [1][0][0][4] = 42, - [1][0][0][5] = 42, - [1][0][0][6] = 42, - [1][0][0][7] = 42, - [1][0][0][8] = 42, - [1][0][0][9] = 42, - [1][0][0][10] = 42, - [1][0][0][11] = 42, - [1][0][0][12] = 36, - [1][0][0][13] = 0, - [1][1][0][0] = 30, - [1][1][0][1] = 30, - [1][1][0][2] = 30, - [1][1][0][3] = 30, - [1][1][0][4] = 30, - [1][1][0][5] = 30, - [1][1][0][6] = 30, - [1][1][0][7] = 30, - [1][1][0][8] = 30, - [1][1][0][9] = 30, - [1][1][0][10] = 30, - [1][1][0][11] = 30, - [1][1][0][12] = 30, - [1][1][0][13] = 0, - [2][0][0][0] = 52, - [2][0][0][1] = 52, - [2][0][0][2] = 52, - [2][0][0][3] = 52, - [2][0][0][4] = 52, - [2][0][0][5] = 52, - [2][0][0][6] = 52, - [2][0][0][7] = 52, - [2][0][0][8] = 52, - [2][0][0][9] = 52, - [2][0][0][10] = 52, - [2][0][0][11] = 52, - [2][0][0][12] = 40, - [2][0][0][13] = 0, - [2][1][0][0] = 40, - [2][1][0][1] = 40, - [2][1][0][2] = 40, - [2][1][0][3] = 40, - [2][1][0][4] = 40, - [2][1][0][5] = 40, - [2][1][0][6] = 40, - [2][1][0][7] = 40, - [2][1][0][8] = 40, - [2][1][0][9] = 40, - [2][1][0][10] = 40, - [2][1][0][11] = 40, - [2][1][0][12] = 26, - [2][1][0][13] = 0, - [0][0][2][0] = 70, - [0][0][1][0] = 32, - [0][0][3][0] = 40, - [0][0][5][0] = 70, - [0][0][6][0] = 32, - [0][0][9][0] = 32, - [0][0][8][0] = 60, - [0][0][11][0] = 32, - [0][0][2][1] = 70, - [0][0][1][1] = 32, - [0][0][3][1] = 40, - [0][0][5][1] = 70, - [0][0][6][1] = 32, - [0][0][9][1] = 32, - [0][0][8][1] = 60, - [0][0][11][1] = 32, - [0][0][2][2] = 74, - [0][0][1][2] = 32, - [0][0][3][2] = 40, - [0][0][5][2] = 74, - [0][0][6][2] = 32, - [0][0][9][2] = 32, - [0][0][8][2] = 60, - [0][0][11][2] = 32, - [0][0][2][3] = 78, - [0][0][1][3] = 32, - [0][0][3][3] = 40, - [0][0][5][3] = 78, - [0][0][6][3] = 32, - [0][0][9][3] = 32, - [0][0][8][3] = 60, - [0][0][11][3] = 32, - [0][0][2][4] = 78, - [0][0][1][4] = 32, - [0][0][3][4] = 40, - [0][0][5][4] = 78, - [0][0][6][4] = 32, - [0][0][9][4] = 32, - [0][0][8][4] = 60, - [0][0][11][4] = 32, - [0][0][2][5] = 78, - [0][0][1][5] = 32, - [0][0][3][5] = 40, - [0][0][5][5] = 78, - [0][0][6][5] = 32, - [0][0][9][5] = 32, - [0][0][8][5] = 60, - [0][0][11][5] = 32, - [0][0][2][6] = 78, - [0][0][1][6] = 32, - [0][0][3][6] = 40, - [0][0][5][6] = 78, - [0][0][6][6] = 32, - [0][0][9][6] = 32, - [0][0][8][6] = 60, - [0][0][11][6] = 32, - [0][0][2][7] = 78, - [0][0][1][7] = 32, - [0][0][3][7] = 40, - [0][0][5][7] = 78, - [0][0][6][7] = 32, - [0][0][9][7] = 32, - [0][0][8][7] = 60, - [0][0][11][7] = 32, - [0][0][2][8] = 74, - [0][0][1][8] = 32, - [0][0][3][8] = 40, - [0][0][5][8] = 74, - [0][0][6][8] = 32, - [0][0][9][8] = 32, - [0][0][8][8] = 60, - [0][0][11][8] = 32, - [0][0][2][9] = 70, - [0][0][1][9] = 32, - [0][0][3][9] = 40, - [0][0][5][9] = 70, - [0][0][6][9] = 32, - [0][0][9][9] = 32, - [0][0][8][9] = 60, - [0][0][11][9] = 32, - [0][0][2][10] = 70, - [0][0][1][10] = 32, - [0][0][3][10] = 40, - [0][0][5][10] = 70, - [0][0][6][10] = 32, - [0][0][9][10] = 32, - [0][0][8][10] = 60, - [0][0][11][10] = 32, - [0][0][2][11] = 58, - [0][0][1][11] = 32, - [0][0][3][11] = 40, - [0][0][5][11] = 58, - [0][0][6][11] = 32, - [0][0][9][11] = 32, - [0][0][8][11] = 60, - [0][0][11][11] = 32, - [0][0][2][12] = 34, - [0][0][1][12] = 32, - [0][0][3][12] = 40, - [0][0][5][12] = 34, - [0][0][6][12] = 32, - [0][0][9][12] = 32, - [0][0][8][12] = 60, - [0][0][11][12] = 32, - [0][0][2][13] = 127, - [0][0][1][13] = 127, - [0][0][3][13] = 127, - [0][0][5][13] = 127, - [0][0][6][13] = 127, - [0][0][9][13] = 127, - [0][0][8][13] = 127, - [0][0][11][13] = 127, - [0][1][2][0] = 64, - [0][1][1][0] = 20, - [0][1][3][0] = 28, - [0][1][5][0] = 64, - [0][1][6][0] = 20, - [0][1][9][0] = 20, - [0][1][8][0] = 48, - [0][1][11][0] = 20, - [0][1][2][1] = 64, - [0][1][1][1] = 20, - [0][1][3][1] = 28, - [0][1][5][1] = 64, - [0][1][6][1] = 20, - [0][1][9][1] = 20, - [0][1][8][1] = 48, - [0][1][11][1] = 20, - [0][1][2][2] = 68, - [0][1][1][2] = 20, - [0][1][3][2] = 28, - [0][1][5][2] = 68, - [0][1][6][2] = 20, - [0][1][9][2] = 20, - [0][1][8][2] = 48, - [0][1][11][2] = 20, - [0][1][2][3] = 72, - [0][1][1][3] = 20, - [0][1][3][3] = 28, - [0][1][5][3] = 72, - [0][1][6][3] = 20, - [0][1][9][3] = 20, - [0][1][8][3] = 48, - [0][1][11][3] = 20, - [0][1][2][4] = 76, - [0][1][1][4] = 20, - [0][1][3][4] = 28, - [0][1][5][4] = 76, - [0][1][6][4] = 20, - [0][1][9][4] = 20, - [0][1][8][4] = 48, - [0][1][11][4] = 20, - [0][1][2][5] = 78, - [0][1][1][5] = 20, - [0][1][3][5] = 28, - [0][1][5][5] = 78, - [0][1][6][5] = 20, - [0][1][9][5] = 20, - [0][1][8][5] = 48, - [0][1][11][5] = 20, - [0][1][2][6] = 76, - [0][1][1][6] = 20, - [0][1][3][6] = 28, - [0][1][5][6] = 76, - [0][1][6][6] = 20, - [0][1][9][6] = 20, - [0][1][8][6] = 48, - [0][1][11][6] = 20, - [0][1][2][7] = 72, - [0][1][1][7] = 20, - [0][1][3][7] = 28, - [0][1][5][7] = 72, - [0][1][6][7] = 20, - [0][1][9][7] = 20, - [0][1][8][7] = 48, - [0][1][11][7] = 20, - [0][1][2][8] = 68, - [0][1][1][8] = 20, - [0][1][3][8] = 28, - [0][1][5][8] = 68, - [0][1][6][8] = 20, - [0][1][9][8] = 20, - [0][1][8][8] = 48, - [0][1][11][8] = 20, - [0][1][2][9] = 64, - [0][1][1][9] = 20, - [0][1][3][9] = 28, - [0][1][5][9] = 64, - [0][1][6][9] = 20, - [0][1][9][9] = 20, - [0][1][8][9] = 48, - [0][1][11][9] = 20, - [0][1][2][10] = 64, - [0][1][1][10] = 20, - [0][1][3][10] = 28, - [0][1][5][10] = 64, - [0][1][6][10] = 20, - [0][1][9][10] = 20, - [0][1][8][10] = 48, - [0][1][11][10] = 20, - [0][1][2][11] = 54, - [0][1][1][11] = 20, - [0][1][3][11] = 28, - [0][1][5][11] = 54, - [0][1][6][11] = 20, - [0][1][9][11] = 20, - [0][1][8][11] = 48, - [0][1][11][11] = 20, - [0][1][2][12] = 32, - [0][1][1][12] = 20, - [0][1][3][12] = 28, - [0][1][5][12] = 32, - [0][1][6][12] = 20, - [0][1][9][12] = 20, - [0][1][8][12] = 48, - [0][1][11][12] = 20, - [0][1][2][13] = 127, - [0][1][1][13] = 127, - [0][1][3][13] = 127, - [0][1][5][13] = 127, - [0][1][6][13] = 127, - [0][1][9][13] = 127, - [0][1][8][13] = 127, - [0][1][11][13] = 127, - [1][0][2][0] = 72, - [1][0][1][0] = 42, - [1][0][3][0] = 50, - [1][0][5][0] = 72, - [1][0][6][0] = 42, - [1][0][9][0] = 42, - [1][0][8][0] = 60, - [1][0][11][0] = 42, - [1][0][2][1] = 72, - [1][0][1][1] = 42, - [1][0][3][1] = 50, - [1][0][5][1] = 72, - [1][0][6][1] = 42, - [1][0][9][1] = 42, - [1][0][8][1] = 60, - [1][0][11][1] = 42, - [1][0][2][2] = 76, - [1][0][1][2] = 42, - [1][0][3][2] = 50, - [1][0][5][2] = 76, - [1][0][6][2] = 42, - [1][0][9][2] = 42, - [1][0][8][2] = 60, - [1][0][11][2] = 42, - [1][0][2][3] = 78, - [1][0][1][3] = 42, - [1][0][3][3] = 50, - [1][0][5][3] = 78, - [1][0][6][3] = 42, - [1][0][9][3] = 42, - [1][0][8][3] = 60, - [1][0][11][3] = 42, - [1][0][2][4] = 78, - [1][0][1][4] = 42, - [1][0][3][4] = 50, - [1][0][5][4] = 78, - [1][0][6][4] = 42, - [1][0][9][4] = 42, - [1][0][8][4] = 60, - [1][0][11][4] = 42, - [1][0][2][5] = 78, - [1][0][1][5] = 42, - [1][0][3][5] = 50, - [1][0][5][5] = 78, - [1][0][6][5] = 42, - [1][0][9][5] = 42, - [1][0][8][5] = 60, - [1][0][11][5] = 42, - [1][0][2][6] = 78, - [1][0][1][6] = 42, - [1][0][3][6] = 50, - [1][0][5][6] = 78, - [1][0][6][6] = 42, - [1][0][9][6] = 42, - [1][0][8][6] = 60, - [1][0][11][6] = 42, - [1][0][2][7] = 78, - [1][0][1][7] = 42, - [1][0][3][7] = 50, - [1][0][5][7] = 78, - [1][0][6][7] = 42, - [1][0][9][7] = 42, - [1][0][8][7] = 60, - [1][0][11][7] = 42, - [1][0][2][8] = 78, - [1][0][1][8] = 42, - [1][0][3][8] = 50, - [1][0][5][8] = 78, - [1][0][6][8] = 42, - [1][0][9][8] = 42, - [1][0][8][8] = 60, - [1][0][11][8] = 42, - [1][0][2][9] = 74, - [1][0][1][9] = 42, - [1][0][3][9] = 50, - [1][0][5][9] = 74, - [1][0][6][9] = 42, - [1][0][9][9] = 42, - [1][0][8][9] = 60, - [1][0][11][9] = 42, - [1][0][2][10] = 74, - [1][0][1][10] = 42, - [1][0][3][10] = 50, - [1][0][5][10] = 74, - [1][0][6][10] = 42, - [1][0][9][10] = 42, - [1][0][8][10] = 60, - [1][0][11][10] = 42, - [1][0][2][11] = 64, - [1][0][1][11] = 42, - [1][0][3][11] = 50, - [1][0][5][11] = 64, - [1][0][6][11] = 42, - [1][0][9][11] = 42, - [1][0][8][11] = 60, - [1][0][11][11] = 42, - [1][0][2][12] = 36, - [1][0][1][12] = 42, - [1][0][3][12] = 50, - [1][0][5][12] = 36, - [1][0][6][12] = 42, - [1][0][9][12] = 42, - [1][0][8][12] = 60, - [1][0][11][12] = 42, - [1][0][2][13] = 127, - [1][0][1][13] = 127, - [1][0][3][13] = 127, - [1][0][5][13] = 127, - [1][0][6][13] = 127, - [1][0][9][13] = 127, - [1][0][8][13] = 127, - [1][0][11][13] = 127, - [1][1][2][0] = 66, - [1][1][1][0] = 30, - [1][1][3][0] = 38, - [1][1][5][0] = 66, - [1][1][6][0] = 30, - [1][1][9][0] = 30, - [1][1][8][0] = 48, - [1][1][11][0] = 30, - [1][1][2][1] = 66, - [1][1][1][1] = 30, - [1][1][3][1] = 38, - [1][1][5][1] = 66, - [1][1][6][1] = 30, - [1][1][9][1] = 30, - [1][1][8][1] = 48, - [1][1][11][1] = 30, - [1][1][2][2] = 70, - [1][1][1][2] = 30, - [1][1][3][2] = 38, - [1][1][5][2] = 70, - [1][1][6][2] = 30, - [1][1][9][2] = 30, - [1][1][8][2] = 48, - [1][1][11][2] = 30, - [1][1][2][3] = 74, - [1][1][1][3] = 30, - [1][1][3][3] = 38, - [1][1][5][3] = 74, - [1][1][6][3] = 30, - [1][1][9][3] = 30, - [1][1][8][3] = 48, - [1][1][11][3] = 30, - [1][1][2][4] = 78, - [1][1][1][4] = 30, - [1][1][3][4] = 38, - [1][1][5][4] = 78, - [1][1][6][4] = 30, - [1][1][9][4] = 30, - [1][1][8][4] = 48, - [1][1][11][4] = 30, - [1][1][2][5] = 78, - [1][1][1][5] = 30, - [1][1][3][5] = 38, - [1][1][5][5] = 78, - [1][1][6][5] = 30, - [1][1][9][5] = 30, - [1][1][8][5] = 48, - [1][1][11][5] = 30, - [1][1][2][6] = 78, - [1][1][1][6] = 30, - [1][1][3][6] = 38, - [1][1][5][6] = 78, - [1][1][6][6] = 30, - [1][1][9][6] = 30, - [1][1][8][6] = 48, - [1][1][11][6] = 30, - [1][1][2][7] = 74, - [1][1][1][7] = 30, - [1][1][3][7] = 38, - [1][1][5][7] = 74, - [1][1][6][7] = 30, - [1][1][9][7] = 30, - [1][1][8][7] = 48, - [1][1][11][7] = 30, - [1][1][2][8] = 70, - [1][1][1][8] = 30, - [1][1][3][8] = 38, - [1][1][5][8] = 70, - [1][1][6][8] = 30, - [1][1][9][8] = 30, - [1][1][8][8] = 48, - [1][1][11][8] = 30, - [1][1][2][9] = 66, - [1][1][1][9] = 30, - [1][1][3][9] = 38, - [1][1][5][9] = 66, - [1][1][6][9] = 30, - [1][1][9][9] = 30, - [1][1][8][9] = 48, - [1][1][11][9] = 30, - [1][1][2][10] = 66, - [1][1][1][10] = 30, - [1][1][3][10] = 38, - [1][1][5][10] = 66, - [1][1][6][10] = 30, - [1][1][9][10] = 30, - [1][1][8][10] = 48, - [1][1][11][10] = 30, - [1][1][2][11] = 60, - [1][1][1][11] = 30, - [1][1][3][11] = 38, - [1][1][5][11] = 60, - [1][1][6][11] = 30, - [1][1][9][11] = 30, - [1][1][8][11] = 48, - [1][1][11][11] = 30, - [1][1][2][12] = 32, - [1][1][1][12] = 30, - [1][1][3][12] = 38, - [1][1][5][12] = 32, - [1][1][6][12] = 30, - [1][1][9][12] = 30, - [1][1][8][12] = 48, - [1][1][11][12] = 30, - [1][1][2][13] = 127, - [1][1][1][13] = 127, - [1][1][3][13] = 127, - [1][1][5][13] = 127, - [1][1][6][13] = 127, - [1][1][9][13] = 127, - [1][1][8][13] = 127, - [1][1][11][13] = 127, - [2][0][2][0] = 76, - [2][0][1][0] = 52, - [2][0][3][0] = 64, - [2][0][5][0] = 76, - [2][0][6][0] = 52, - [2][0][9][0] = 52, - [2][0][8][0] = 60, - [2][0][11][0] = 52, - [2][0][2][1] = 76, - [2][0][1][1] = 52, - [2][0][3][1] = 64, - [2][0][5][1] = 76, - [2][0][6][1] = 52, - [2][0][9][1] = 52, - [2][0][8][1] = 60, - [2][0][11][1] = 52, - [2][0][2][2] = 78, - [2][0][1][2] = 52, - [2][0][3][2] = 64, - [2][0][5][2] = 78, - [2][0][6][2] = 52, - [2][0][9][2] = 52, - [2][0][8][2] = 60, - [2][0][11][2] = 52, - [2][0][2][3] = 78, - [2][0][1][3] = 52, - [2][0][3][3] = 64, - [2][0][5][3] = 78, - [2][0][6][3] = 52, - [2][0][9][3] = 52, - [2][0][8][3] = 60, - [2][0][11][3] = 52, - [2][0][2][4] = 78, - [2][0][1][4] = 52, - [2][0][3][4] = 64, - [2][0][5][4] = 78, - [2][0][6][4] = 52, - [2][0][9][4] = 52, - [2][0][8][4] = 60, - [2][0][11][4] = 52, - [2][0][2][5] = 78, - [2][0][1][5] = 52, - [2][0][3][5] = 64, - [2][0][5][5] = 78, - [2][0][6][5] = 52, - [2][0][9][5] = 52, - [2][0][8][5] = 60, - [2][0][11][5] = 52, - [2][0][2][6] = 78, - [2][0][1][6] = 52, - [2][0][3][6] = 64, - [2][0][5][6] = 78, - [2][0][6][6] = 52, - [2][0][9][6] = 52, - [2][0][8][6] = 60, - [2][0][11][6] = 52, - [2][0][2][7] = 78, - [2][0][1][7] = 52, - [2][0][3][7] = 64, - [2][0][5][7] = 78, - [2][0][6][7] = 52, - [2][0][9][7] = 52, - [2][0][8][7] = 60, - [2][0][11][7] = 52, - [2][0][2][8] = 78, - [2][0][1][8] = 52, - [2][0][3][8] = 64, - [2][0][5][8] = 78, - [2][0][6][8] = 52, - [2][0][9][8] = 52, - [2][0][8][8] = 60, - [2][0][11][8] = 52, - [2][0][2][9] = 76, - [2][0][1][9] = 52, - [2][0][3][9] = 64, - [2][0][5][9] = 76, - [2][0][6][9] = 52, - [2][0][9][9] = 52, - [2][0][8][9] = 60, - [2][0][11][9] = 52, - [2][0][2][10] = 76, - [2][0][1][10] = 52, - [2][0][3][10] = 64, - [2][0][5][10] = 76, - [2][0][6][10] = 52, - [2][0][9][10] = 52, - [2][0][8][10] = 60, - [2][0][11][10] = 52, - [2][0][2][11] = 68, - [2][0][1][11] = 52, - [2][0][3][11] = 64, - [2][0][5][11] = 68, - [2][0][6][11] = 52, - [2][0][9][11] = 52, - [2][0][8][11] = 60, - [2][0][11][11] = 52, - [2][0][2][12] = 40, - [2][0][1][12] = 52, - [2][0][3][12] = 64, - [2][0][5][12] = 40, - [2][0][6][12] = 52, - [2][0][9][12] = 52, - [2][0][8][12] = 60, - [2][0][11][12] = 52, - [2][0][2][13] = 127, - [2][0][1][13] = 127, - [2][0][3][13] = 127, - [2][0][5][13] = 127, - [2][0][6][13] = 127, - [2][0][9][13] = 127, - [2][0][8][13] = 127, - [2][0][11][13] = 127, - [2][1][2][0] = 68, - [2][1][1][0] = 40, - [2][1][3][0] = 52, - [2][1][5][0] = 68, - [2][1][6][0] = 40, - [2][1][9][0] = 40, - [2][1][8][0] = 48, - [2][1][11][0] = 40, - [2][1][2][1] = 68, - [2][1][1][1] = 40, - [2][1][3][1] = 52, - [2][1][5][1] = 68, - [2][1][6][1] = 40, - [2][1][9][1] = 40, - [2][1][8][1] = 48, - [2][1][11][1] = 40, - [2][1][2][2] = 72, - [2][1][1][2] = 40, - [2][1][3][2] = 52, - [2][1][5][2] = 72, - [2][1][6][2] = 40, - [2][1][9][2] = 40, - [2][1][8][2] = 48, - [2][1][11][2] = 40, - [2][1][2][3] = 76, - [2][1][1][3] = 40, - [2][1][3][3] = 52, - [2][1][5][3] = 76, - [2][1][6][3] = 40, - [2][1][9][3] = 40, - [2][1][8][3] = 48, - [2][1][11][3] = 40, - [2][1][2][4] = 78, - [2][1][1][4] = 40, - [2][1][3][4] = 52, - [2][1][5][4] = 78, - [2][1][6][4] = 40, - [2][1][9][4] = 40, - [2][1][8][4] = 48, - [2][1][11][4] = 40, - [2][1][2][5] = 78, - [2][1][1][5] = 40, - [2][1][3][5] = 52, - [2][1][5][5] = 78, - [2][1][6][5] = 40, - [2][1][9][5] = 40, - [2][1][8][5] = 48, - [2][1][11][5] = 40, - [2][1][2][6] = 78, - [2][1][1][6] = 40, - [2][1][3][6] = 52, - [2][1][5][6] = 78, - [2][1][6][6] = 40, - [2][1][9][6] = 40, - [2][1][8][6] = 48, - [2][1][11][6] = 40, - [2][1][2][7] = 78, - [2][1][1][7] = 40, - [2][1][3][7] = 52, - [2][1][5][7] = 78, - [2][1][6][7] = 40, - [2][1][9][7] = 40, - [2][1][8][7] = 48, - [2][1][11][7] = 40, - [2][1][2][8] = 74, - [2][1][1][8] = 40, - [2][1][3][8] = 52, - [2][1][5][8] = 74, - [2][1][6][8] = 40, - [2][1][9][8] = 40, - [2][1][8][8] = 48, - [2][1][11][8] = 40, - [2][1][2][9] = 70, - [2][1][1][9] = 40, - [2][1][3][9] = 52, - [2][1][5][9] = 70, - [2][1][6][9] = 40, - [2][1][9][9] = 40, - [2][1][8][9] = 48, - [2][1][11][9] = 40, - [2][1][2][10] = 70, - [2][1][1][10] = 40, - [2][1][3][10] = 52, - [2][1][5][10] = 70, - [2][1][6][10] = 40, - [2][1][9][10] = 40, - [2][1][8][10] = 48, - [2][1][11][10] = 40, - [2][1][2][11] = 48, - [2][1][1][11] = 40, - [2][1][3][11] = 52, - [2][1][5][11] = 48, - [2][1][6][11] = 40, - [2][1][9][11] = 40, - [2][1][8][11] = 48, - [2][1][11][11] = 40, - [2][1][2][12] = 26, - [2][1][1][12] = 40, - [2][1][3][12] = 52, - [2][1][5][12] = 26, - [2][1][6][12] = 40, - [2][1][9][12] = 40, - [2][1][8][12] = 48, - [2][1][11][12] = 40, - [2][1][2][13] = 127, - [2][1][1][13] = 127, - [2][1][3][13] = 127, - [2][1][5][13] = 127, - [2][1][6][13] = 127, - [2][1][9][13] = 127, - [2][1][8][13] = 127, - [2][1][11][13] = 127, + [0][0][RTW89_WW][0] = 32, + [0][0][RTW89_WW][1] = 32, + [0][0][RTW89_WW][2] = 32, + [0][0][RTW89_WW][3] = 32, + [0][0][RTW89_WW][4] = 32, + [0][0][RTW89_WW][5] = 32, + [0][0][RTW89_WW][6] = 32, + [0][0][RTW89_WW][7] = 32, + [0][0][RTW89_WW][8] = 32, + [0][0][RTW89_WW][9] = 32, + [0][0][RTW89_WW][10] = 32, + [0][0][RTW89_WW][11] = 32, + [0][0][RTW89_WW][12] = 32, + [0][0][RTW89_WW][13] = 0, + [0][1][RTW89_WW][0] = 20, + [0][1][RTW89_WW][1] = 20, + [0][1][RTW89_WW][2] = 20, + [0][1][RTW89_WW][3] = 20, + [0][1][RTW89_WW][4] = 20, + [0][1][RTW89_WW][5] = 20, + [0][1][RTW89_WW][6] = 20, + [0][1][RTW89_WW][7] = 20, + [0][1][RTW89_WW][8] = 20, + [0][1][RTW89_WW][9] = 20, + [0][1][RTW89_WW][10] = 20, + [0][1][RTW89_WW][11] = 20, + [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][13] = 0, + [1][0][RTW89_WW][0] = 42, + [1][0][RTW89_WW][1] = 42, + [1][0][RTW89_WW][2] = 42, + [1][0][RTW89_WW][3] = 42, + [1][0][RTW89_WW][4] = 42, + [1][0][RTW89_WW][5] = 42, + [1][0][RTW89_WW][6] = 42, + [1][0][RTW89_WW][7] = 42, + [1][0][RTW89_WW][8] = 42, + [1][0][RTW89_WW][9] = 42, + [1][0][RTW89_WW][10] = 42, + [1][0][RTW89_WW][11] = 42, + [1][0][RTW89_WW][12] = 36, + [1][0][RTW89_WW][13] = 0, + [1][1][RTW89_WW][0] = 30, + [1][1][RTW89_WW][1] = 30, + [1][1][RTW89_WW][2] = 30, + [1][1][RTW89_WW][3] = 30, + [1][1][RTW89_WW][4] = 30, + [1][1][RTW89_WW][5] = 30, + [1][1][RTW89_WW][6] = 30, + [1][1][RTW89_WW][7] = 30, + [1][1][RTW89_WW][8] = 30, + [1][1][RTW89_WW][9] = 30, + [1][1][RTW89_WW][10] = 30, + [1][1][RTW89_WW][11] = 30, + [1][1][RTW89_WW][12] = 30, + [1][1][RTW89_WW][13] = 0, + [2][0][RTW89_WW][0] = 52, + [2][0][RTW89_WW][1] = 52, + [2][0][RTW89_WW][2] = 52, + [2][0][RTW89_WW][3] = 52, + [2][0][RTW89_WW][4] = 52, + [2][0][RTW89_WW][5] = 52, + [2][0][RTW89_WW][6] = 52, + [2][0][RTW89_WW][7] = 52, + [2][0][RTW89_WW][8] = 52, + [2][0][RTW89_WW][9] = 52, + [2][0][RTW89_WW][10] = 52, + [2][0][RTW89_WW][11] = 52, + [2][0][RTW89_WW][12] = 40, + [2][0][RTW89_WW][13] = 0, + [2][1][RTW89_WW][0] = 40, + [2][1][RTW89_WW][1] = 40, + [2][1][RTW89_WW][2] = 40, + [2][1][RTW89_WW][3] = 40, + [2][1][RTW89_WW][4] = 40, + [2][1][RTW89_WW][5] = 40, + [2][1][RTW89_WW][6] = 40, + [2][1][RTW89_WW][7] = 40, + [2][1][RTW89_WW][8] = 40, + [2][1][RTW89_WW][9] = 40, + [2][1][RTW89_WW][10] = 40, + [2][1][RTW89_WW][11] = 40, + [2][1][RTW89_WW][12] = 26, + [2][1][RTW89_WW][13] = 0, + [0][0][RTW89_FCC][0] = 70, + [0][0][RTW89_ETSI][0] = 32, + [0][0][RTW89_MKK][0] = 40, + [0][0][RTW89_IC][0] = 70, + [0][0][RTW89_KCC][0] = 46, + [0][0][RTW89_ACMA][0] = 32, + [0][0][RTW89_CHILE][0] = 60, + [0][0][RTW89_UKRAINE][0] = 32, + [0][0][RTW89_MEXICO][0] = 70, + [0][0][RTW89_CN][0] = 32, + [0][0][RTW89_QATAR][0] = 32, + [0][0][RTW89_FCC][1] = 70, + [0][0][RTW89_ETSI][1] = 32, + [0][0][RTW89_MKK][1] = 40, + [0][0][RTW89_IC][1] = 70, + [0][0][RTW89_KCC][1] = 46, + [0][0][RTW89_ACMA][1] = 32, + [0][0][RTW89_CHILE][1] = 60, + [0][0][RTW89_UKRAINE][1] = 32, + [0][0][RTW89_MEXICO][1] = 70, + [0][0][RTW89_CN][1] = 32, + [0][0][RTW89_QATAR][1] = 32, + [0][0][RTW89_FCC][2] = 74, + [0][0][RTW89_ETSI][2] = 32, + [0][0][RTW89_MKK][2] = 40, + [0][0][RTW89_IC][2] = 74, + [0][0][RTW89_KCC][2] = 46, + [0][0][RTW89_ACMA][2] = 32, + [0][0][RTW89_CHILE][2] = 60, + [0][0][RTW89_UKRAINE][2] = 32, + [0][0][RTW89_MEXICO][2] = 74, + [0][0][RTW89_CN][2] = 32, + [0][0][RTW89_QATAR][2] = 32, + [0][0][RTW89_FCC][3] = 78, + [0][0][RTW89_ETSI][3] = 32, + [0][0][RTW89_MKK][3] = 40, + [0][0][RTW89_IC][3] = 78, + [0][0][RTW89_KCC][3] = 46, + [0][0][RTW89_ACMA][3] = 32, + [0][0][RTW89_CHILE][3] = 60, + [0][0][RTW89_UKRAINE][3] = 32, + [0][0][RTW89_MEXICO][3] = 78, + [0][0][RTW89_CN][3] = 32, + [0][0][RTW89_QATAR][3] = 32, + [0][0][RTW89_FCC][4] = 78, + [0][0][RTW89_ETSI][4] = 32, + [0][0][RTW89_MKK][4] = 40, + [0][0][RTW89_IC][4] = 78, + [0][0][RTW89_KCC][4] = 46, + [0][0][RTW89_ACMA][4] = 32, + [0][0][RTW89_CHILE][4] = 60, + [0][0][RTW89_UKRAINE][4] = 32, + [0][0][RTW89_MEXICO][4] = 78, + [0][0][RTW89_CN][4] = 32, + [0][0][RTW89_QATAR][4] = 32, + [0][0][RTW89_FCC][5] = 78, + [0][0][RTW89_ETSI][5] = 32, + [0][0][RTW89_MKK][5] = 40, + [0][0][RTW89_IC][5] = 78, + [0][0][RTW89_KCC][5] = 46, + [0][0][RTW89_ACMA][5] = 32, + [0][0][RTW89_CHILE][5] = 60, + [0][0][RTW89_UKRAINE][5] = 32, + [0][0][RTW89_MEXICO][5] = 78, + [0][0][RTW89_CN][5] = 32, + [0][0][RTW89_QATAR][5] = 32, + [0][0][RTW89_FCC][6] = 78, + [0][0][RTW89_ETSI][6] = 32, + [0][0][RTW89_MKK][6] = 40, + [0][0][RTW89_IC][6] = 78, + [0][0][RTW89_KCC][6] = 46, + [0][0][RTW89_ACMA][6] = 32, + [0][0][RTW89_CHILE][6] = 60, + [0][0][RTW89_UKRAINE][6] = 32, + [0][0][RTW89_MEXICO][6] = 78, + [0][0][RTW89_CN][6] = 32, + [0][0][RTW89_QATAR][6] = 32, + [0][0][RTW89_FCC][7] = 78, + [0][0][RTW89_ETSI][7] = 32, + [0][0][RTW89_MKK][7] = 40, + [0][0][RTW89_IC][7] = 78, + [0][0][RTW89_KCC][7] = 46, + [0][0][RTW89_ACMA][7] = 32, + [0][0][RTW89_CHILE][7] = 60, + [0][0][RTW89_UKRAINE][7] = 32, + [0][0][RTW89_MEXICO][7] = 78, + [0][0][RTW89_CN][7] = 32, + [0][0][RTW89_QATAR][7] = 32, + [0][0][RTW89_FCC][8] = 74, + [0][0][RTW89_ETSI][8] = 32, + [0][0][RTW89_MKK][8] = 40, + [0][0][RTW89_IC][8] = 74, + [0][0][RTW89_KCC][8] = 46, + [0][0][RTW89_ACMA][8] = 32, + [0][0][RTW89_CHILE][8] = 60, + [0][0][RTW89_UKRAINE][8] = 32, + [0][0][RTW89_MEXICO][8] = 74, + [0][0][RTW89_CN][8] = 32, + [0][0][RTW89_QATAR][8] = 32, + [0][0][RTW89_FCC][9] = 70, + [0][0][RTW89_ETSI][9] = 32, + [0][0][RTW89_MKK][9] = 40, + [0][0][RTW89_IC][9] = 70, + [0][0][RTW89_KCC][9] = 46, + [0][0][RTW89_ACMA][9] = 32, + [0][0][RTW89_CHILE][9] = 60, + [0][0][RTW89_UKRAINE][9] = 32, + [0][0][RTW89_MEXICO][9] = 70, + [0][0][RTW89_CN][9] = 32, + [0][0][RTW89_QATAR][9] = 32, + [0][0][RTW89_FCC][10] = 70, + [0][0][RTW89_ETSI][10] = 32, + [0][0][RTW89_MKK][10] = 40, + [0][0][RTW89_IC][10] = 70, + [0][0][RTW89_KCC][10] = 46, + [0][0][RTW89_ACMA][10] = 32, + [0][0][RTW89_CHILE][10] = 60, + [0][0][RTW89_UKRAINE][10] = 32, + [0][0][RTW89_MEXICO][10] = 70, + [0][0][RTW89_CN][10] = 32, + [0][0][RTW89_QATAR][10] = 32, + [0][0][RTW89_FCC][11] = 58, + [0][0][RTW89_ETSI][11] = 32, + [0][0][RTW89_MKK][11] = 40, + [0][0][RTW89_IC][11] = 58, + [0][0][RTW89_KCC][11] = 46, + [0][0][RTW89_ACMA][11] = 32, + [0][0][RTW89_CHILE][11] = 58, + [0][0][RTW89_UKRAINE][11] = 32, + [0][0][RTW89_MEXICO][11] = 58, + [0][0][RTW89_CN][11] = 32, + [0][0][RTW89_QATAR][11] = 32, + [0][0][RTW89_FCC][12] = 34, + [0][0][RTW89_ETSI][12] = 32, + [0][0][RTW89_MKK][12] = 40, + [0][0][RTW89_IC][12] = 34, + [0][0][RTW89_KCC][12] = 46, + [0][0][RTW89_ACMA][12] = 32, + [0][0][RTW89_CHILE][12] = 34, + [0][0][RTW89_UKRAINE][12] = 32, + [0][0][RTW89_MEXICO][12] = 34, + [0][0][RTW89_CN][12] = 32, + [0][0][RTW89_QATAR][12] = 32, + [0][0][RTW89_FCC][13] = 127, + [0][0][RTW89_ETSI][13] = 127, + [0][0][RTW89_MKK][13] = 127, + [0][0][RTW89_IC][13] = 127, + [0][0][RTW89_KCC][13] = 127, + [0][0][RTW89_ACMA][13] = 127, + [0][0][RTW89_CHILE][13] = 127, + [0][0][RTW89_UKRAINE][13] = 127, + [0][0][RTW89_MEXICO][13] = 127, + [0][0][RTW89_CN][13] = 127, + [0][0][RTW89_QATAR][13] = 127, + [0][1][RTW89_FCC][0] = 64, + [0][1][RTW89_ETSI][0] = 20, + [0][1][RTW89_MKK][0] = 28, + [0][1][RTW89_IC][0] = 64, + [0][1][RTW89_KCC][0] = 32, + [0][1][RTW89_ACMA][0] = 20, + [0][1][RTW89_CHILE][0] = 48, + [0][1][RTW89_UKRAINE][0] = 20, + [0][1][RTW89_MEXICO][0] = 64, + [0][1][RTW89_CN][0] = 20, + [0][1][RTW89_QATAR][0] = 20, + [0][1][RTW89_FCC][1] = 64, + [0][1][RTW89_ETSI][1] = 20, + [0][1][RTW89_MKK][1] = 28, + [0][1][RTW89_IC][1] = 64, + [0][1][RTW89_KCC][1] = 32, + [0][1][RTW89_ACMA][1] = 20, + [0][1][RTW89_CHILE][1] = 48, + [0][1][RTW89_UKRAINE][1] = 20, + [0][1][RTW89_MEXICO][1] = 64, + [0][1][RTW89_CN][1] = 20, + [0][1][RTW89_QATAR][1] = 20, + [0][1][RTW89_FCC][2] = 68, + [0][1][RTW89_ETSI][2] = 20, + [0][1][RTW89_MKK][2] = 28, + [0][1][RTW89_IC][2] = 68, + [0][1][RTW89_KCC][2] = 32, + [0][1][RTW89_ACMA][2] = 20, + [0][1][RTW89_CHILE][2] = 48, + [0][1][RTW89_UKRAINE][2] = 20, + [0][1][RTW89_MEXICO][2] = 68, + [0][1][RTW89_CN][2] = 20, + [0][1][RTW89_QATAR][2] = 20, + [0][1][RTW89_FCC][3] = 72, + [0][1][RTW89_ETSI][3] = 20, + [0][1][RTW89_MKK][3] = 28, + [0][1][RTW89_IC][3] = 72, + [0][1][RTW89_KCC][3] = 32, + [0][1][RTW89_ACMA][3] = 20, + [0][1][RTW89_CHILE][3] = 48, + [0][1][RTW89_UKRAINE][3] = 20, + [0][1][RTW89_MEXICO][3] = 72, + [0][1][RTW89_CN][3] = 20, + [0][1][RTW89_QATAR][3] = 20, + [0][1][RTW89_FCC][4] = 76, + [0][1][RTW89_ETSI][4] = 20, + [0][1][RTW89_MKK][4] = 28, + [0][1][RTW89_IC][4] = 76, + [0][1][RTW89_KCC][4] = 32, + [0][1][RTW89_ACMA][4] = 20, + [0][1][RTW89_CHILE][4] = 48, + [0][1][RTW89_UKRAINE][4] = 20, + [0][1][RTW89_MEXICO][4] = 76, + [0][1][RTW89_CN][4] = 20, + [0][1][RTW89_QATAR][4] = 20, + [0][1][RTW89_FCC][5] = 78, + [0][1][RTW89_ETSI][5] = 20, + [0][1][RTW89_MKK][5] = 28, + [0][1][RTW89_IC][5] = 78, + [0][1][RTW89_KCC][5] = 32, + [0][1][RTW89_ACMA][5] = 20, + [0][1][RTW89_CHILE][5] = 48, + [0][1][RTW89_UKRAINE][5] = 20, + [0][1][RTW89_MEXICO][5] = 78, + [0][1][RTW89_CN][5] = 20, + [0][1][RTW89_QATAR][5] = 20, + [0][1][RTW89_FCC][6] = 76, + [0][1][RTW89_ETSI][6] = 20, + [0][1][RTW89_MKK][6] = 28, + [0][1][RTW89_IC][6] = 76, + [0][1][RTW89_KCC][6] = 32, + [0][1][RTW89_ACMA][6] = 20, + [0][1][RTW89_CHILE][6] = 48, + [0][1][RTW89_UKRAINE][6] = 20, + [0][1][RTW89_MEXICO][6] = 76, + [0][1][RTW89_CN][6] = 20, + [0][1][RTW89_QATAR][6] = 20, + [0][1][RTW89_FCC][7] = 72, + [0][1][RTW89_ETSI][7] = 20, + [0][1][RTW89_MKK][7] = 28, + [0][1][RTW89_IC][7] = 72, + [0][1][RTW89_KCC][7] = 32, + [0][1][RTW89_ACMA][7] = 20, + [0][1][RTW89_CHILE][7] = 48, + [0][1][RTW89_UKRAINE][7] = 20, + [0][1][RTW89_MEXICO][7] = 72, + [0][1][RTW89_CN][7] = 20, + [0][1][RTW89_QATAR][7] = 20, + [0][1][RTW89_FCC][8] = 68, + [0][1][RTW89_ETSI][8] = 20, + [0][1][RTW89_MKK][8] = 28, + [0][1][RTW89_IC][8] = 68, + [0][1][RTW89_KCC][8] = 32, + [0][1][RTW89_ACMA][8] = 20, + [0][1][RTW89_CHILE][8] = 48, + [0][1][RTW89_UKRAINE][8] = 20, + [0][1][RTW89_MEXICO][8] = 68, + [0][1][RTW89_CN][8] = 20, + [0][1][RTW89_QATAR][8] = 20, + [0][1][RTW89_FCC][9] = 64, + [0][1][RTW89_ETSI][9] = 20, + [0][1][RTW89_MKK][9] = 28, + [0][1][RTW89_IC][9] = 64, + [0][1][RTW89_KCC][9] = 32, + [0][1][RTW89_ACMA][9] = 20, + [0][1][RTW89_CHILE][9] = 48, + [0][1][RTW89_UKRAINE][9] = 20, + [0][1][RTW89_MEXICO][9] = 64, + [0][1][RTW89_CN][9] = 20, + [0][1][RTW89_QATAR][9] = 20, + [0][1][RTW89_FCC][10] = 64, + [0][1][RTW89_ETSI][10] = 20, + [0][1][RTW89_MKK][10] = 28, + [0][1][RTW89_IC][10] = 64, + [0][1][RTW89_KCC][10] = 32, + [0][1][RTW89_ACMA][10] = 20, + [0][1][RTW89_CHILE][10] = 48, + [0][1][RTW89_UKRAINE][10] = 20, + [0][1][RTW89_MEXICO][10] = 64, + [0][1][RTW89_CN][10] = 20, + [0][1][RTW89_QATAR][10] = 20, + [0][1][RTW89_FCC][11] = 54, + [0][1][RTW89_ETSI][11] = 20, + [0][1][RTW89_MKK][11] = 28, + [0][1][RTW89_IC][11] = 54, + [0][1][RTW89_KCC][11] = 32, + [0][1][RTW89_ACMA][11] = 20, + [0][1][RTW89_CHILE][11] = 48, + [0][1][RTW89_UKRAINE][11] = 20, + [0][1][RTW89_MEXICO][11] = 54, + [0][1][RTW89_CN][11] = 20, + [0][1][RTW89_QATAR][11] = 20, + [0][1][RTW89_FCC][12] = 32, + [0][1][RTW89_ETSI][12] = 20, + [0][1][RTW89_MKK][12] = 28, + [0][1][RTW89_IC][12] = 32, + [0][1][RTW89_KCC][12] = 32, + [0][1][RTW89_ACMA][12] = 20, + [0][1][RTW89_CHILE][12] = 32, + [0][1][RTW89_UKRAINE][12] = 20, + [0][1][RTW89_MEXICO][12] = 32, + [0][1][RTW89_CN][12] = 20, + [0][1][RTW89_QATAR][12] = 20, + [0][1][RTW89_FCC][13] = 127, + [0][1][RTW89_ETSI][13] = 127, + [0][1][RTW89_MKK][13] = 127, + [0][1][RTW89_IC][13] = 127, + [0][1][RTW89_KCC][13] = 127, + [0][1][RTW89_ACMA][13] = 127, + [0][1][RTW89_CHILE][13] = 127, + [0][1][RTW89_UKRAINE][13] = 127, + [0][1][RTW89_MEXICO][13] = 127, + [0][1][RTW89_CN][13] = 127, + [0][1][RTW89_QATAR][13] = 127, + [1][0][RTW89_FCC][0] = 72, + [1][0][RTW89_ETSI][0] = 42, + [1][0][RTW89_MKK][0] = 50, + [1][0][RTW89_IC][0] = 72, + [1][0][RTW89_KCC][0] = 58, + [1][0][RTW89_ACMA][0] = 42, + [1][0][RTW89_CHILE][0] = 60, + [1][0][RTW89_UKRAINE][0] = 42, + [1][0][RTW89_MEXICO][0] = 72, + [1][0][RTW89_CN][0] = 42, + [1][0][RTW89_QATAR][0] = 42, + [1][0][RTW89_FCC][1] = 72, + [1][0][RTW89_ETSI][1] = 42, + [1][0][RTW89_MKK][1] = 50, + [1][0][RTW89_IC][1] = 72, + [1][0][RTW89_KCC][1] = 58, + [1][0][RTW89_ACMA][1] = 42, + [1][0][RTW89_CHILE][1] = 60, + [1][0][RTW89_UKRAINE][1] = 42, + [1][0][RTW89_MEXICO][1] = 72, + [1][0][RTW89_CN][1] = 42, + [1][0][RTW89_QATAR][1] = 42, + [1][0][RTW89_FCC][2] = 76, + [1][0][RTW89_ETSI][2] = 42, + [1][0][RTW89_MKK][2] = 50, + [1][0][RTW89_IC][2] = 76, + [1][0][RTW89_KCC][2] = 58, + [1][0][RTW89_ACMA][2] = 42, + [1][0][RTW89_CHILE][2] = 60, + [1][0][RTW89_UKRAINE][2] = 42, + [1][0][RTW89_MEXICO][2] = 76, + [1][0][RTW89_CN][2] = 42, + [1][0][RTW89_QATAR][2] = 42, + [1][0][RTW89_FCC][3] = 78, + [1][0][RTW89_ETSI][3] = 42, + [1][0][RTW89_MKK][3] = 50, + [1][0][RTW89_IC][3] = 78, + [1][0][RTW89_KCC][3] = 58, + [1][0][RTW89_ACMA][3] = 42, + [1][0][RTW89_CHILE][3] = 60, + [1][0][RTW89_UKRAINE][3] = 42, + [1][0][RTW89_MEXICO][3] = 78, + [1][0][RTW89_CN][3] = 42, + [1][0][RTW89_QATAR][3] = 42, + [1][0][RTW89_FCC][4] = 78, + [1][0][RTW89_ETSI][4] = 42, + [1][0][RTW89_MKK][4] = 50, + [1][0][RTW89_IC][4] = 78, + [1][0][RTW89_KCC][4] = 58, + [1][0][RTW89_ACMA][4] = 42, + [1][0][RTW89_CHILE][4] = 60, + [1][0][RTW89_UKRAINE][4] = 42, + [1][0][RTW89_MEXICO][4] = 78, + [1][0][RTW89_CN][4] = 42, + [1][0][RTW89_QATAR][4] = 42, + [1][0][RTW89_FCC][5] = 78, + [1][0][RTW89_ETSI][5] = 42, + [1][0][RTW89_MKK][5] = 50, + [1][0][RTW89_IC][5] = 78, + [1][0][RTW89_KCC][5] = 58, + [1][0][RTW89_ACMA][5] = 42, + [1][0][RTW89_CHILE][5] = 60, + [1][0][RTW89_UKRAINE][5] = 42, + [1][0][RTW89_MEXICO][5] = 78, + [1][0][RTW89_CN][5] = 42, + [1][0][RTW89_QATAR][5] = 42, + [1][0][RTW89_FCC][6] = 78, + [1][0][RTW89_ETSI][6] = 42, + [1][0][RTW89_MKK][6] = 50, + [1][0][RTW89_IC][6] = 78, + [1][0][RTW89_KCC][6] = 58, + [1][0][RTW89_ACMA][6] = 42, + [1][0][RTW89_CHILE][6] = 60, + [1][0][RTW89_UKRAINE][6] = 42, + [1][0][RTW89_MEXICO][6] = 78, + [1][0][RTW89_CN][6] = 42, + [1][0][RTW89_QATAR][6] = 42, + [1][0][RTW89_FCC][7] = 78, + [1][0][RTW89_ETSI][7] = 42, + [1][0][RTW89_MKK][7] = 50, + [1][0][RTW89_IC][7] = 78, + [1][0][RTW89_KCC][7] = 58, + [1][0][RTW89_ACMA][7] = 42, + [1][0][RTW89_CHILE][7] = 60, + [1][0][RTW89_UKRAINE][7] = 42, + [1][0][RTW89_MEXICO][7] = 78, + [1][0][RTW89_CN][7] = 42, + [1][0][RTW89_QATAR][7] = 42, + [1][0][RTW89_FCC][8] = 78, + [1][0][RTW89_ETSI][8] = 42, + [1][0][RTW89_MKK][8] = 50, + [1][0][RTW89_IC][8] = 78, + [1][0][RTW89_KCC][8] = 58, + [1][0][RTW89_ACMA][8] = 42, + [1][0][RTW89_CHILE][8] = 60, + [1][0][RTW89_UKRAINE][8] = 42, + [1][0][RTW89_MEXICO][8] = 78, + [1][0][RTW89_CN][8] = 42, + [1][0][RTW89_QATAR][8] = 42, + [1][0][RTW89_FCC][9] = 74, + [1][0][RTW89_ETSI][9] = 42, + [1][0][RTW89_MKK][9] = 50, + [1][0][RTW89_IC][9] = 74, + [1][0][RTW89_KCC][9] = 58, + [1][0][RTW89_ACMA][9] = 42, + [1][0][RTW89_CHILE][9] = 60, + [1][0][RTW89_UKRAINE][9] = 42, + [1][0][RTW89_MEXICO][9] = 74, + [1][0][RTW89_CN][9] = 42, + [1][0][RTW89_QATAR][9] = 42, + [1][0][RTW89_FCC][10] = 74, + [1][0][RTW89_ETSI][10] = 42, + [1][0][RTW89_MKK][10] = 50, + [1][0][RTW89_IC][10] = 74, + [1][0][RTW89_KCC][10] = 58, + [1][0][RTW89_ACMA][10] = 42, + [1][0][RTW89_CHILE][10] = 60, + [1][0][RTW89_UKRAINE][10] = 42, + [1][0][RTW89_MEXICO][10] = 74, + [1][0][RTW89_CN][10] = 42, + [1][0][RTW89_QATAR][10] = 42, + [1][0][RTW89_FCC][11] = 64, + [1][0][RTW89_ETSI][11] = 42, + [1][0][RTW89_MKK][11] = 50, + [1][0][RTW89_IC][11] = 64, + [1][0][RTW89_KCC][11] = 58, + [1][0][RTW89_ACMA][11] = 42, + [1][0][RTW89_CHILE][11] = 60, + [1][0][RTW89_UKRAINE][11] = 42, + [1][0][RTW89_MEXICO][11] = 64, + [1][0][RTW89_CN][11] = 42, + [1][0][RTW89_QATAR][11] = 42, + [1][0][RTW89_FCC][12] = 36, + [1][0][RTW89_ETSI][12] = 42, + [1][0][RTW89_MKK][12] = 50, + [1][0][RTW89_IC][12] = 36, + [1][0][RTW89_KCC][12] = 58, + [1][0][RTW89_ACMA][12] = 42, + [1][0][RTW89_CHILE][12] = 36, + [1][0][RTW89_UKRAINE][12] = 42, + [1][0][RTW89_MEXICO][12] = 36, + [1][0][RTW89_CN][12] = 42, + [1][0][RTW89_QATAR][12] = 42, + [1][0][RTW89_FCC][13] = 127, + [1][0][RTW89_ETSI][13] = 127, + [1][0][RTW89_MKK][13] = 127, + [1][0][RTW89_IC][13] = 127, + [1][0][RTW89_KCC][13] = 127, + [1][0][RTW89_ACMA][13] = 127, + [1][0][RTW89_CHILE][13] = 127, + [1][0][RTW89_UKRAINE][13] = 127, + [1][0][RTW89_MEXICO][13] = 127, + [1][0][RTW89_CN][13] = 127, + [1][0][RTW89_QATAR][13] = 127, + [1][1][RTW89_FCC][0] = 66, + [1][1][RTW89_ETSI][0] = 30, + [1][1][RTW89_MKK][0] = 38, + [1][1][RTW89_IC][0] = 66, + [1][1][RTW89_KCC][0] = 44, + [1][1][RTW89_ACMA][0] = 30, + [1][1][RTW89_CHILE][0] = 48, + [1][1][RTW89_UKRAINE][0] = 30, + [1][1][RTW89_MEXICO][0] = 66, + [1][1][RTW89_CN][0] = 30, + [1][1][RTW89_QATAR][0] = 30, + [1][1][RTW89_FCC][1] = 66, + [1][1][RTW89_ETSI][1] = 30, + [1][1][RTW89_MKK][1] = 38, + [1][1][RTW89_IC][1] = 66, + [1][1][RTW89_KCC][1] = 44, + [1][1][RTW89_ACMA][1] = 30, + [1][1][RTW89_CHILE][1] = 48, + [1][1][RTW89_UKRAINE][1] = 30, + [1][1][RTW89_MEXICO][1] = 66, + [1][1][RTW89_CN][1] = 30, + [1][1][RTW89_QATAR][1] = 30, + [1][1][RTW89_FCC][2] = 70, + [1][1][RTW89_ETSI][2] = 30, + [1][1][RTW89_MKK][2] = 38, + [1][1][RTW89_IC][2] = 70, + [1][1][RTW89_KCC][2] = 44, + [1][1][RTW89_ACMA][2] = 30, + [1][1][RTW89_CHILE][2] = 48, + [1][1][RTW89_UKRAINE][2] = 30, + [1][1][RTW89_MEXICO][2] = 70, + [1][1][RTW89_CN][2] = 30, + [1][1][RTW89_QATAR][2] = 30, + [1][1][RTW89_FCC][3] = 74, + [1][1][RTW89_ETSI][3] = 30, + [1][1][RTW89_MKK][3] = 38, + [1][1][RTW89_IC][3] = 74, + [1][1][RTW89_KCC][3] = 44, + [1][1][RTW89_ACMA][3] = 30, + [1][1][RTW89_CHILE][3] = 48, + [1][1][RTW89_UKRAINE][3] = 30, + [1][1][RTW89_MEXICO][3] = 74, + [1][1][RTW89_CN][3] = 30, + [1][1][RTW89_QATAR][3] = 30, + [1][1][RTW89_FCC][4] = 78, + [1][1][RTW89_ETSI][4] = 30, + [1][1][RTW89_MKK][4] = 38, + [1][1][RTW89_IC][4] = 78, + [1][1][RTW89_KCC][4] = 44, + [1][1][RTW89_ACMA][4] = 30, + [1][1][RTW89_CHILE][4] = 48, + [1][1][RTW89_UKRAINE][4] = 30, + [1][1][RTW89_MEXICO][4] = 78, + [1][1][RTW89_CN][4] = 30, + [1][1][RTW89_QATAR][4] = 30, + [1][1][RTW89_FCC][5] = 78, + [1][1][RTW89_ETSI][5] = 30, + [1][1][RTW89_MKK][5] = 38, + [1][1][RTW89_IC][5] = 78, + [1][1][RTW89_KCC][5] = 44, + [1][1][RTW89_ACMA][5] = 30, + [1][1][RTW89_CHILE][5] = 48, + [1][1][RTW89_UKRAINE][5] = 30, + [1][1][RTW89_MEXICO][5] = 78, + [1][1][RTW89_CN][5] = 30, + [1][1][RTW89_QATAR][5] = 30, + [1][1][RTW89_FCC][6] = 78, + [1][1][RTW89_ETSI][6] = 30, + [1][1][RTW89_MKK][6] = 38, + [1][1][RTW89_IC][6] = 78, + [1][1][RTW89_KCC][6] = 44, + [1][1][RTW89_ACMA][6] = 30, + [1][1][RTW89_CHILE][6] = 48, + [1][1][RTW89_UKRAINE][6] = 30, + [1][1][RTW89_MEXICO][6] = 78, + [1][1][RTW89_CN][6] = 30, + [1][1][RTW89_QATAR][6] = 30, + [1][1][RTW89_FCC][7] = 74, + [1][1][RTW89_ETSI][7] = 30, + [1][1][RTW89_MKK][7] = 38, + [1][1][RTW89_IC][7] = 74, + [1][1][RTW89_KCC][7] = 44, + [1][1][RTW89_ACMA][7] = 30, + [1][1][RTW89_CHILE][7] = 48, + [1][1][RTW89_UKRAINE][7] = 30, + [1][1][RTW89_MEXICO][7] = 74, + [1][1][RTW89_CN][7] = 30, + [1][1][RTW89_QATAR][7] = 30, + [1][1][RTW89_FCC][8] = 70, + [1][1][RTW89_ETSI][8] = 30, + [1][1][RTW89_MKK][8] = 38, + [1][1][RTW89_IC][8] = 70, + [1][1][RTW89_KCC][8] = 44, + [1][1][RTW89_ACMA][8] = 30, + [1][1][RTW89_CHILE][8] = 48, + [1][1][RTW89_UKRAINE][8] = 30, + [1][1][RTW89_MEXICO][8] = 70, + [1][1][RTW89_CN][8] = 30, + [1][1][RTW89_QATAR][8] = 30, + [1][1][RTW89_FCC][9] = 66, + [1][1][RTW89_ETSI][9] = 30, + [1][1][RTW89_MKK][9] = 38, + [1][1][RTW89_IC][9] = 66, + [1][1][RTW89_KCC][9] = 44, + [1][1][RTW89_ACMA][9] = 30, + [1][1][RTW89_CHILE][9] = 48, + [1][1][RTW89_UKRAINE][9] = 30, + [1][1][RTW89_MEXICO][9] = 66, + [1][1][RTW89_CN][9] = 30, + [1][1][RTW89_QATAR][9] = 30, + [1][1][RTW89_FCC][10] = 66, + [1][1][RTW89_ETSI][10] = 30, + [1][1][RTW89_MKK][10] = 38, + [1][1][RTW89_IC][10] = 66, + [1][1][RTW89_KCC][10] = 44, + [1][1][RTW89_ACMA][10] = 30, + [1][1][RTW89_CHILE][10] = 48, + [1][1][RTW89_UKRAINE][10] = 30, + [1][1][RTW89_MEXICO][10] = 66, + [1][1][RTW89_CN][10] = 30, + [1][1][RTW89_QATAR][10] = 30, + [1][1][RTW89_FCC][11] = 60, + [1][1][RTW89_ETSI][11] = 30, + [1][1][RTW89_MKK][11] = 38, + [1][1][RTW89_IC][11] = 60, + [1][1][RTW89_KCC][11] = 44, + [1][1][RTW89_ACMA][11] = 30, + [1][1][RTW89_CHILE][11] = 48, + [1][1][RTW89_UKRAINE][11] = 30, + [1][1][RTW89_MEXICO][11] = 60, + [1][1][RTW89_CN][11] = 30, + [1][1][RTW89_QATAR][11] = 30, + [1][1][RTW89_FCC][12] = 32, + [1][1][RTW89_ETSI][12] = 30, + [1][1][RTW89_MKK][12] = 38, + [1][1][RTW89_IC][12] = 32, + [1][1][RTW89_KCC][12] = 44, + [1][1][RTW89_ACMA][12] = 30, + [1][1][RTW89_CHILE][12] = 32, + [1][1][RTW89_UKRAINE][12] = 30, + [1][1][RTW89_MEXICO][12] = 32, + [1][1][RTW89_CN][12] = 30, + [1][1][RTW89_QATAR][12] = 30, + [1][1][RTW89_FCC][13] = 127, + [1][1][RTW89_ETSI][13] = 127, + [1][1][RTW89_MKK][13] = 127, + [1][1][RTW89_IC][13] = 127, + [1][1][RTW89_KCC][13] = 127, + [1][1][RTW89_ACMA][13] = 127, + [1][1][RTW89_CHILE][13] = 127, + [1][1][RTW89_UKRAINE][13] = 127, + [1][1][RTW89_MEXICO][13] = 127, + [1][1][RTW89_CN][13] = 127, + [1][1][RTW89_QATAR][13] = 127, + [2][0][RTW89_FCC][0] = 76, + [2][0][RTW89_ETSI][0] = 52, + [2][0][RTW89_MKK][0] = 64, + [2][0][RTW89_IC][0] = 76, + [2][0][RTW89_KCC][0] = 70, + [2][0][RTW89_ACMA][0] = 52, + [2][0][RTW89_CHILE][0] = 60, + [2][0][RTW89_UKRAINE][0] = 52, + [2][0][RTW89_MEXICO][0] = 76, + [2][0][RTW89_CN][0] = 52, + [2][0][RTW89_QATAR][0] = 52, + [2][0][RTW89_FCC][1] = 76, + [2][0][RTW89_ETSI][1] = 52, + [2][0][RTW89_MKK][1] = 64, + [2][0][RTW89_IC][1] = 76, + [2][0][RTW89_KCC][1] = 70, + [2][0][RTW89_ACMA][1] = 52, + [2][0][RTW89_CHILE][1] = 60, + [2][0][RTW89_UKRAINE][1] = 52, + [2][0][RTW89_MEXICO][1] = 76, + [2][0][RTW89_CN][1] = 52, + [2][0][RTW89_QATAR][1] = 52, + [2][0][RTW89_FCC][2] = 78, + [2][0][RTW89_ETSI][2] = 52, + [2][0][RTW89_MKK][2] = 64, + [2][0][RTW89_IC][2] = 78, + [2][0][RTW89_KCC][2] = 70, + [2][0][RTW89_ACMA][2] = 52, + [2][0][RTW89_CHILE][2] = 60, + [2][0][RTW89_UKRAINE][2] = 52, + [2][0][RTW89_MEXICO][2] = 78, + [2][0][RTW89_CN][2] = 52, + [2][0][RTW89_QATAR][2] = 52, + [2][0][RTW89_FCC][3] = 78, + [2][0][RTW89_ETSI][3] = 52, + [2][0][RTW89_MKK][3] = 64, + [2][0][RTW89_IC][3] = 78, + [2][0][RTW89_KCC][3] = 70, + [2][0][RTW89_ACMA][3] = 52, + [2][0][RTW89_CHILE][3] = 60, + [2][0][RTW89_UKRAINE][3] = 52, + [2][0][RTW89_MEXICO][3] = 78, + [2][0][RTW89_CN][3] = 52, + [2][0][RTW89_QATAR][3] = 52, + [2][0][RTW89_FCC][4] = 78, + [2][0][RTW89_ETSI][4] = 52, + [2][0][RTW89_MKK][4] = 64, + [2][0][RTW89_IC][4] = 78, + [2][0][RTW89_KCC][4] = 70, + [2][0][RTW89_ACMA][4] = 52, + [2][0][RTW89_CHILE][4] = 60, + [2][0][RTW89_UKRAINE][4] = 52, + [2][0][RTW89_MEXICO][4] = 78, + [2][0][RTW89_CN][4] = 52, + [2][0][RTW89_QATAR][4] = 52, + [2][0][RTW89_FCC][5] = 78, + [2][0][RTW89_ETSI][5] = 52, + [2][0][RTW89_MKK][5] = 64, + [2][0][RTW89_IC][5] = 78, + [2][0][RTW89_KCC][5] = 70, + [2][0][RTW89_ACMA][5] = 52, + [2][0][RTW89_CHILE][5] = 60, + [2][0][RTW89_UKRAINE][5] = 52, + [2][0][RTW89_MEXICO][5] = 78, + [2][0][RTW89_CN][5] = 52, + [2][0][RTW89_QATAR][5] = 52, + [2][0][RTW89_FCC][6] = 78, + [2][0][RTW89_ETSI][6] = 52, + [2][0][RTW89_MKK][6] = 64, + [2][0][RTW89_IC][6] = 78, + [2][0][RTW89_KCC][6] = 70, + [2][0][RTW89_ACMA][6] = 52, + [2][0][RTW89_CHILE][6] = 60, + [2][0][RTW89_UKRAINE][6] = 52, + [2][0][RTW89_MEXICO][6] = 78, + [2][0][RTW89_CN][6] = 52, + [2][0][RTW89_QATAR][6] = 52, + [2][0][RTW89_FCC][7] = 78, + [2][0][RTW89_ETSI][7] = 52, + [2][0][RTW89_MKK][7] = 64, + [2][0][RTW89_IC][7] = 78, + [2][0][RTW89_KCC][7] = 70, + [2][0][RTW89_ACMA][7] = 52, + [2][0][RTW89_CHILE][7] = 60, + [2][0][RTW89_UKRAINE][7] = 52, + [2][0][RTW89_MEXICO][7] = 78, + [2][0][RTW89_CN][7] = 52, + [2][0][RTW89_QATAR][7] = 52, + [2][0][RTW89_FCC][8] = 78, + [2][0][RTW89_ETSI][8] = 52, + [2][0][RTW89_MKK][8] = 64, + [2][0][RTW89_IC][8] = 78, + [2][0][RTW89_KCC][8] = 70, + [2][0][RTW89_ACMA][8] = 52, + [2][0][RTW89_CHILE][8] = 60, + [2][0][RTW89_UKRAINE][8] = 52, + [2][0][RTW89_MEXICO][8] = 78, + [2][0][RTW89_CN][8] = 52, + [2][0][RTW89_QATAR][8] = 52, + [2][0][RTW89_FCC][9] = 76, + [2][0][RTW89_ETSI][9] = 52, + [2][0][RTW89_MKK][9] = 64, + [2][0][RTW89_IC][9] = 76, + [2][0][RTW89_KCC][9] = 70, + [2][0][RTW89_ACMA][9] = 52, + [2][0][RTW89_CHILE][9] = 60, + [2][0][RTW89_UKRAINE][9] = 52, + [2][0][RTW89_MEXICO][9] = 76, + [2][0][RTW89_CN][9] = 52, + [2][0][RTW89_QATAR][9] = 52, + [2][0][RTW89_FCC][10] = 76, + [2][0][RTW89_ETSI][10] = 52, + [2][0][RTW89_MKK][10] = 64, + [2][0][RTW89_IC][10] = 76, + [2][0][RTW89_KCC][10] = 70, + [2][0][RTW89_ACMA][10] = 52, + [2][0][RTW89_CHILE][10] = 60, + [2][0][RTW89_UKRAINE][10] = 52, + [2][0][RTW89_MEXICO][10] = 76, + [2][0][RTW89_CN][10] = 52, + [2][0][RTW89_QATAR][10] = 52, + [2][0][RTW89_FCC][11] = 68, + [2][0][RTW89_ETSI][11] = 52, + [2][0][RTW89_MKK][11] = 64, + [2][0][RTW89_IC][11] = 68, + [2][0][RTW89_KCC][11] = 70, + [2][0][RTW89_ACMA][11] = 52, + [2][0][RTW89_CHILE][11] = 60, + [2][0][RTW89_UKRAINE][11] = 52, + [2][0][RTW89_MEXICO][11] = 68, + [2][0][RTW89_CN][11] = 52, + [2][0][RTW89_QATAR][11] = 52, + [2][0][RTW89_FCC][12] = 40, + [2][0][RTW89_ETSI][12] = 52, + [2][0][RTW89_MKK][12] = 64, + [2][0][RTW89_IC][12] = 40, + [2][0][RTW89_KCC][12] = 70, + [2][0][RTW89_ACMA][12] = 52, + [2][0][RTW89_CHILE][12] = 40, + [2][0][RTW89_UKRAINE][12] = 52, + [2][0][RTW89_MEXICO][12] = 40, + [2][0][RTW89_CN][12] = 52, + [2][0][RTW89_QATAR][12] = 52, + [2][0][RTW89_FCC][13] = 127, + [2][0][RTW89_ETSI][13] = 127, + [2][0][RTW89_MKK][13] = 127, + [2][0][RTW89_IC][13] = 127, + [2][0][RTW89_KCC][13] = 127, + [2][0][RTW89_ACMA][13] = 127, + [2][0][RTW89_CHILE][13] = 127, + [2][0][RTW89_UKRAINE][13] = 127, + [2][0][RTW89_MEXICO][13] = 127, + [2][0][RTW89_CN][13] = 127, + [2][0][RTW89_QATAR][13] = 127, + [2][1][RTW89_FCC][0] = 68, + [2][1][RTW89_ETSI][0] = 40, + [2][1][RTW89_MKK][0] = 52, + [2][1][RTW89_IC][0] = 68, + [2][1][RTW89_KCC][0] = 56, + [2][1][RTW89_ACMA][0] = 40, + [2][1][RTW89_CHILE][0] = 48, + [2][1][RTW89_UKRAINE][0] = 40, + [2][1][RTW89_MEXICO][0] = 68, + [2][1][RTW89_CN][0] = 40, + [2][1][RTW89_QATAR][0] = 40, + [2][1][RTW89_FCC][1] = 68, + [2][1][RTW89_ETSI][1] = 40, + [2][1][RTW89_MKK][1] = 52, + [2][1][RTW89_IC][1] = 68, + [2][1][RTW89_KCC][1] = 56, + [2][1][RTW89_ACMA][1] = 40, + [2][1][RTW89_CHILE][1] = 48, + [2][1][RTW89_UKRAINE][1] = 40, + [2][1][RTW89_MEXICO][1] = 68, + [2][1][RTW89_CN][1] = 40, + [2][1][RTW89_QATAR][1] = 40, + [2][1][RTW89_FCC][2] = 72, + [2][1][RTW89_ETSI][2] = 40, + [2][1][RTW89_MKK][2] = 52, + [2][1][RTW89_IC][2] = 72, + [2][1][RTW89_KCC][2] = 56, + [2][1][RTW89_ACMA][2] = 40, + [2][1][RTW89_CHILE][2] = 48, + [2][1][RTW89_UKRAINE][2] = 40, + [2][1][RTW89_MEXICO][2] = 72, + [2][1][RTW89_CN][2] = 40, + [2][1][RTW89_QATAR][2] = 40, + [2][1][RTW89_FCC][3] = 76, + [2][1][RTW89_ETSI][3] = 40, + [2][1][RTW89_MKK][3] = 52, + [2][1][RTW89_IC][3] = 76, + [2][1][RTW89_KCC][3] = 56, + [2][1][RTW89_ACMA][3] = 40, + [2][1][RTW89_CHILE][3] = 48, + [2][1][RTW89_UKRAINE][3] = 40, + [2][1][RTW89_MEXICO][3] = 76, + [2][1][RTW89_CN][3] = 40, + [2][1][RTW89_QATAR][3] = 40, + [2][1][RTW89_FCC][4] = 78, + [2][1][RTW89_ETSI][4] = 40, + [2][1][RTW89_MKK][4] = 52, + [2][1][RTW89_IC][4] = 78, + [2][1][RTW89_KCC][4] = 56, + [2][1][RTW89_ACMA][4] = 40, + [2][1][RTW89_CHILE][4] = 48, + [2][1][RTW89_UKRAINE][4] = 40, + [2][1][RTW89_MEXICO][4] = 78, + [2][1][RTW89_CN][4] = 40, + [2][1][RTW89_QATAR][4] = 40, + [2][1][RTW89_FCC][5] = 78, + [2][1][RTW89_ETSI][5] = 40, + [2][1][RTW89_MKK][5] = 52, + [2][1][RTW89_IC][5] = 78, + [2][1][RTW89_KCC][5] = 56, + [2][1][RTW89_ACMA][5] = 40, + [2][1][RTW89_CHILE][5] = 48, + [2][1][RTW89_UKRAINE][5] = 40, + [2][1][RTW89_MEXICO][5] = 78, + [2][1][RTW89_CN][5] = 40, + [2][1][RTW89_QATAR][5] = 40, + [2][1][RTW89_FCC][6] = 78, + [2][1][RTW89_ETSI][6] = 40, + [2][1][RTW89_MKK][6] = 52, + [2][1][RTW89_IC][6] = 78, + [2][1][RTW89_KCC][6] = 56, + [2][1][RTW89_ACMA][6] = 40, + [2][1][RTW89_CHILE][6] = 48, + [2][1][RTW89_UKRAINE][6] = 40, + [2][1][RTW89_MEXICO][6] = 78, + [2][1][RTW89_CN][6] = 40, + [2][1][RTW89_QATAR][6] = 40, + [2][1][RTW89_FCC][7] = 78, + [2][1][RTW89_ETSI][7] = 40, + [2][1][RTW89_MKK][7] = 52, + [2][1][RTW89_IC][7] = 78, + [2][1][RTW89_KCC][7] = 56, + [2][1][RTW89_ACMA][7] = 40, + [2][1][RTW89_CHILE][7] = 48, + [2][1][RTW89_UKRAINE][7] = 40, + [2][1][RTW89_MEXICO][7] = 78, + [2][1][RTW89_CN][7] = 40, + [2][1][RTW89_QATAR][7] = 40, + [2][1][RTW89_FCC][8] = 74, + [2][1][RTW89_ETSI][8] = 40, + [2][1][RTW89_MKK][8] = 52, + [2][1][RTW89_IC][8] = 74, + [2][1][RTW89_KCC][8] = 56, + [2][1][RTW89_ACMA][8] = 40, + [2][1][RTW89_CHILE][8] = 48, + [2][1][RTW89_UKRAINE][8] = 40, + [2][1][RTW89_MEXICO][8] = 74, + [2][1][RTW89_CN][8] = 40, + [2][1][RTW89_QATAR][8] = 40, + [2][1][RTW89_FCC][9] = 70, + [2][1][RTW89_ETSI][9] = 40, + [2][1][RTW89_MKK][9] = 52, + [2][1][RTW89_IC][9] = 70, + [2][1][RTW89_KCC][9] = 56, + [2][1][RTW89_ACMA][9] = 40, + [2][1][RTW89_CHILE][9] = 48, + [2][1][RTW89_UKRAINE][9] = 40, + [2][1][RTW89_MEXICO][9] = 70, + [2][1][RTW89_CN][9] = 40, + [2][1][RTW89_QATAR][9] = 40, + [2][1][RTW89_FCC][10] = 70, + [2][1][RTW89_ETSI][10] = 40, + [2][1][RTW89_MKK][10] = 52, + [2][1][RTW89_IC][10] = 70, + [2][1][RTW89_KCC][10] = 56, + [2][1][RTW89_ACMA][10] = 40, + [2][1][RTW89_CHILE][10] = 48, + [2][1][RTW89_UKRAINE][10] = 40, + [2][1][RTW89_MEXICO][10] = 70, + [2][1][RTW89_CN][10] = 40, + [2][1][RTW89_QATAR][10] = 40, + [2][1][RTW89_FCC][11] = 48, + [2][1][RTW89_ETSI][11] = 40, + [2][1][RTW89_MKK][11] = 52, + [2][1][RTW89_IC][11] = 48, + [2][1][RTW89_KCC][11] = 56, + [2][1][RTW89_ACMA][11] = 40, + [2][1][RTW89_CHILE][11] = 48, + [2][1][RTW89_UKRAINE][11] = 40, + [2][1][RTW89_MEXICO][11] = 48, + [2][1][RTW89_CN][11] = 40, + [2][1][RTW89_QATAR][11] = 40, + [2][1][RTW89_FCC][12] = 26, + [2][1][RTW89_ETSI][12] = 40, + [2][1][RTW89_MKK][12] = 52, + [2][1][RTW89_IC][12] = 26, + [2][1][RTW89_KCC][12] = 56, + [2][1][RTW89_ACMA][12] = 40, + [2][1][RTW89_CHILE][12] = 26, + [2][1][RTW89_UKRAINE][12] = 40, + [2][1][RTW89_MEXICO][12] = 26, + [2][1][RTW89_CN][12] = 40, + [2][1][RTW89_QATAR][12] = 40, + [2][1][RTW89_FCC][13] = 127, + [2][1][RTW89_ETSI][13] = 127, + [2][1][RTW89_MKK][13] = 127, + [2][1][RTW89_IC][13] = 127, + [2][1][RTW89_KCC][13] = 127, + [2][1][RTW89_ACMA][13] = 127, + [2][1][RTW89_CHILE][13] = 127, + [2][1][RTW89_UKRAINE][13] = 127, + [2][1][RTW89_MEXICO][13] = 127, + [2][1][RTW89_CN][13] = 127, + [2][1][RTW89_QATAR][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { - [0][0][0][0] = 22, - [0][0][0][2] = 22, - [0][0][0][4] = 22, - [0][0][0][6] = 22, - [0][0][0][8] = 24, - [0][0][0][10] = 24, - [0][0][0][12] = 24, - [0][0][0][14] = 24, - [0][0][0][15] = 24, - [0][0][0][17] = 24, - [0][0][0][19] = 24, - [0][0][0][21] = 24, - [0][0][0][23] = 24, - [0][0][0][25] = 24, - [0][0][0][27] = 24, - [0][0][0][29] = 24, - [0][0][0][31] = 24, - [0][0][0][33] = 24, - [0][0][0][35] = 24, - [0][0][0][37] = 24, - [0][0][0][38] = 28, - [0][0][0][40] = 28, - [0][0][0][42] = 28, - [0][0][0][44] = 28, - [0][0][0][46] = 28, - [0][1][0][0] = 8, - [0][1][0][2] = 8, - [0][1][0][4] = 8, - [0][1][0][6] = 8, - [0][1][0][8] = 12, - [0][1][0][10] = 12, - [0][1][0][12] = 12, - [0][1][0][14] = 12, - [0][1][0][15] = 12, - [0][1][0][17] = 12, - [0][1][0][19] = 12, - [0][1][0][21] = 12, - [0][1][0][23] = 12, - [0][1][0][25] = 12, - [0][1][0][27] = 12, - [0][1][0][29] = 12, - [0][1][0][31] = 12, - [0][1][0][33] = 12, - [0][1][0][35] = 12, - [0][1][0][37] = 12, - [0][1][0][38] = 16, - [0][1][0][40] = 16, - [0][1][0][42] = 16, - [0][1][0][44] = 16, - [0][1][0][46] = 16, - [1][0][0][0] = 30, - [1][0][0][2] = 30, - [1][0][0][4] = 30, - [1][0][0][6] = 30, - [1][0][0][8] = 36, - [1][0][0][10] = 36, - [1][0][0][12] = 36, - [1][0][0][14] = 36, - [1][0][0][15] = 36, - [1][0][0][17] = 36, - [1][0][0][19] = 36, - [1][0][0][21] = 36, - [1][0][0][23] = 36, - [1][0][0][25] = 36, - [1][0][0][27] = 36, - [1][0][0][29] = 36, - [1][0][0][31] = 36, - [1][0][0][33] = 36, - [1][0][0][35] = 36, - [1][0][0][37] = 36, - [1][0][0][38] = 28, - [1][0][0][40] = 28, - [1][0][0][42] = 28, - [1][0][0][44] = 28, - [1][0][0][46] = 28, - [1][1][0][0] = 18, - [1][1][0][2] = 18, - [1][1][0][4] = 18, - [1][1][0][6] = 18, - [1][1][0][8] = 22, - [1][1][0][10] = 22, - [1][1][0][12] = 22, - [1][1][0][14] = 22, - [1][1][0][15] = 22, - [1][1][0][17] = 22, - [1][1][0][19] = 22, - [1][1][0][21] = 22, - [1][1][0][23] = 22, - [1][1][0][25] = 22, - [1][1][0][27] = 22, - [1][1][0][29] = 22, - [1][1][0][31] = 22, - [1][1][0][33] = 22, - [1][1][0][35] = 22, - [1][1][0][37] = 22, - [1][1][0][38] = 16, - [1][1][0][40] = 16, - [1][1][0][42] = 16, - [1][1][0][44] = 16, - [1][1][0][46] = 16, - [2][0][0][0] = 30, - [2][0][0][2] = 30, - [2][0][0][4] = 30, - [2][0][0][6] = 30, - [2][0][0][8] = 46, - [2][0][0][10] = 46, - [2][0][0][12] = 46, - [2][0][0][14] = 46, - [2][0][0][15] = 46, - [2][0][0][17] = 46, - [2][0][0][19] = 46, - [2][0][0][21] = 46, - [2][0][0][23] = 46, - [2][0][0][25] = 46, - [2][0][0][27] = 46, - [2][0][0][29] = 46, - [2][0][0][31] = 46, - [2][0][0][33] = 46, - [2][0][0][35] = 46, - [2][0][0][37] = 46, - [2][0][0][38] = 28, - [2][0][0][40] = 28, - [2][0][0][42] = 28, - [2][0][0][44] = 28, - [2][0][0][46] = 28, - [2][1][0][0] = 18, - [2][1][0][2] = 18, - [2][1][0][4] = 18, - [2][1][0][6] = 18, - [2][1][0][8] = 32, - [2][1][0][10] = 32, - [2][1][0][12] = 32, - [2][1][0][14] = 32, - [2][1][0][15] = 32, - [2][1][0][17] = 32, - [2][1][0][19] = 32, - [2][1][0][21] = 32, - [2][1][0][23] = 32, - [2][1][0][25] = 32, - [2][1][0][27] = 32, - [2][1][0][29] = 32, - [2][1][0][31] = 32, - [2][1][0][33] = 32, - [2][1][0][35] = 32, - [2][1][0][37] = 32, - [2][1][0][38] = 16, - [2][1][0][40] = 16, - [2][1][0][42] = 16, - [2][1][0][44] = 16, - [2][1][0][46] = 16, - [0][0][2][0] = 48, - [0][0][1][0] = 24, - [0][0][3][0] = 26, - [0][0][5][0] = 22, - [0][0][6][0] = 24, - [0][0][9][0] = 24, - [0][0][8][0] = 30, - [0][0][11][0] = 24, - [0][0][2][2] = 48, - [0][0][1][2] = 24, - [0][0][3][2] = 26, - [0][0][5][2] = 22, - [0][0][6][2] = 24, - [0][0][9][2] = 24, - [0][0][8][2] = 30, - [0][0][11][2] = 24, - [0][0][2][4] = 48, - [0][0][1][4] = 24, - [0][0][3][4] = 26, - [0][0][5][4] = 22, - [0][0][6][4] = 24, - [0][0][9][4] = 24, - [0][0][8][4] = 30, - [0][0][11][4] = 24, - [0][0][2][6] = 48, - [0][0][1][6] = 24, - [0][0][3][6] = 26, - [0][0][5][6] = 22, - [0][0][6][6] = 24, - [0][0][9][6] = 24, - [0][0][8][6] = 30, - [0][0][11][6] = 24, - [0][0][2][8] = 48, - [0][0][1][8] = 24, - [0][0][3][8] = 26, - [0][0][5][8] = 48, - [0][0][6][8] = 24, - [0][0][9][8] = 24, - [0][0][8][8] = 54, - [0][0][11][8] = 24, - [0][0][2][10] = 48, - [0][0][1][10] = 24, - [0][0][3][10] = 26, - [0][0][5][10] = 48, - [0][0][6][10] = 24, - [0][0][9][10] = 24, - [0][0][8][10] = 54, - [0][0][11][10] = 24, - [0][0][2][12] = 48, - [0][0][1][12] = 24, - [0][0][3][12] = 26, - [0][0][5][12] = 48, - [0][0][6][12] = 24, - [0][0][9][12] = 24, - [0][0][8][12] = 54, - [0][0][11][12] = 24, - [0][0][2][14] = 48, - [0][0][1][14] = 24, - [0][0][3][14] = 26, - [0][0][5][14] = 48, - [0][0][6][14] = 24, - [0][0][9][14] = 24, - [0][0][8][14] = 54, - [0][0][11][14] = 24, - [0][0][2][15] = 48, - [0][0][1][15] = 24, - [0][0][3][15] = 44, - [0][0][5][15] = 48, - [0][0][6][15] = 24, - [0][0][9][15] = 24, - [0][0][8][15] = 54, - [0][0][11][15] = 24, - [0][0][2][17] = 48, - [0][0][1][17] = 24, - [0][0][3][17] = 44, - [0][0][5][17] = 48, - [0][0][6][17] = 24, - [0][0][9][17] = 24, - [0][0][8][17] = 54, - [0][0][11][17] = 24, - [0][0][2][19] = 48, - [0][0][1][19] = 24, - [0][0][3][19] = 44, - [0][0][5][19] = 48, - [0][0][6][19] = 24, - [0][0][9][19] = 24, - [0][0][8][19] = 54, - [0][0][11][19] = 24, - [0][0][2][21] = 48, - [0][0][1][21] = 24, - [0][0][3][21] = 44, - [0][0][5][21] = 48, - [0][0][6][21] = 24, - [0][0][9][21] = 24, - [0][0][8][21] = 54, - [0][0][11][21] = 24, - [0][0][2][23] = 48, - [0][0][1][23] = 24, - [0][0][3][23] = 44, - [0][0][5][23] = 48, - [0][0][6][23] = 24, - [0][0][9][23] = 24, - [0][0][8][23] = 54, - [0][0][11][23] = 24, - [0][0][2][25] = 48, - [0][0][1][25] = 24, - [0][0][3][25] = 44, - [0][0][5][25] = 127, - [0][0][6][25] = 24, - [0][0][9][25] = 127, - [0][0][8][25] = 54, - [0][0][11][25] = 24, - [0][0][2][27] = 48, - [0][0][1][27] = 24, - [0][0][3][27] = 44, - [0][0][5][27] = 127, - [0][0][6][27] = 24, - [0][0][9][27] = 127, - [0][0][8][27] = 54, - [0][0][11][27] = 24, - [0][0][2][29] = 48, - [0][0][1][29] = 24, - [0][0][3][29] = 44, - [0][0][5][29] = 127, - [0][0][6][29] = 24, - [0][0][9][29] = 127, - [0][0][8][29] = 54, - [0][0][11][29] = 24, - [0][0][2][31] = 48, - [0][0][1][31] = 24, - [0][0][3][31] = 44, - [0][0][5][31] = 48, - [0][0][6][31] = 24, - [0][0][9][31] = 24, - [0][0][8][31] = 54, - [0][0][11][31] = 24, - [0][0][2][33] = 48, - [0][0][1][33] = 24, - [0][0][3][33] = 44, - [0][0][5][33] = 48, - [0][0][6][33] = 24, - [0][0][9][33] = 24, - [0][0][8][33] = 54, - [0][0][11][33] = 24, - [0][0][2][35] = 48, - [0][0][1][35] = 24, - [0][0][3][35] = 44, - [0][0][5][35] = 48, - [0][0][6][35] = 24, - [0][0][9][35] = 24, - [0][0][8][35] = 54, - [0][0][11][35] = 24, - [0][0][2][37] = 48, - [0][0][1][37] = 127, - [0][0][3][37] = 44, - [0][0][5][37] = 48, - [0][0][6][37] = 24, - [0][0][9][37] = 48, - [0][0][8][37] = 54, - [0][0][11][37] = 127, - [0][0][2][38] = 76, - [0][0][1][38] = 28, - [0][0][3][38] = 127, - [0][0][5][38] = 76, - [0][0][6][38] = 28, - [0][0][9][38] = 76, - [0][0][8][38] = 54, - [0][0][11][38] = 28, - [0][0][2][40] = 76, - [0][0][1][40] = 28, - [0][0][3][40] = 127, - [0][0][5][40] = 76, - [0][0][6][40] = 28, - [0][0][9][40] = 76, - [0][0][8][40] = 54, - [0][0][11][40] = 28, - [0][0][2][42] = 76, - [0][0][1][42] = 28, - [0][0][3][42] = 127, - [0][0][5][42] = 76, - [0][0][6][42] = 28, - [0][0][9][42] = 76, - [0][0][8][42] = 54, - [0][0][11][42] = 28, - [0][0][2][44] = 76, - [0][0][1][44] = 28, - [0][0][3][44] = 127, - [0][0][5][44] = 76, - [0][0][6][44] = 28, - [0][0][9][44] = 76, - [0][0][8][44] = 54, - [0][0][11][44] = 28, - [0][0][2][46] = 76, - [0][0][1][46] = 28, - [0][0][3][46] = 127, - [0][0][5][46] = 76, - [0][0][6][46] = 28, - [0][0][9][46] = 76, - [0][0][8][46] = 54, - [0][0][11][46] = 28, - [0][1][2][0] = 36, - [0][1][1][0] = 12, - [0][1][3][0] = 14, - [0][1][5][0] = 8, - [0][1][6][0] = 12, - [0][1][9][0] = 12, - [0][1][8][0] = 18, - [0][1][11][0] = 12, - [0][1][2][2] = 36, - [0][1][1][2] = 12, - [0][1][3][2] = 14, - [0][1][5][2] = 8, - [0][1][6][2] = 12, - [0][1][9][2] = 12, - [0][1][8][2] = 18, - [0][1][11][2] = 12, - [0][1][2][4] = 36, - [0][1][1][4] = 12, - [0][1][3][4] = 14, - [0][1][5][4] = 8, - [0][1][6][4] = 12, - [0][1][9][4] = 12, - [0][1][8][4] = 18, - [0][1][11][4] = 12, - [0][1][2][6] = 36, - [0][1][1][6] = 12, - [0][1][3][6] = 14, - [0][1][5][6] = 8, - [0][1][6][6] = 12, - [0][1][9][6] = 12, - [0][1][8][6] = 18, - [0][1][11][6] = 12, - [0][1][2][8] = 36, - [0][1][1][8] = 12, - [0][1][3][8] = 14, - [0][1][5][8] = 36, - [0][1][6][8] = 12, - [0][1][9][8] = 12, - [0][1][8][8] = 42, - [0][1][11][8] = 12, - [0][1][2][10] = 36, - [0][1][1][10] = 12, - [0][1][3][10] = 14, - [0][1][5][10] = 36, - [0][1][6][10] = 12, - [0][1][9][10] = 12, - [0][1][8][10] = 42, - [0][1][11][10] = 12, - [0][1][2][12] = 36, - [0][1][1][12] = 12, - [0][1][3][12] = 14, - [0][1][5][12] = 36, - [0][1][6][12] = 12, - [0][1][9][12] = 12, - [0][1][8][12] = 42, - [0][1][11][12] = 12, - [0][1][2][14] = 36, - [0][1][1][14] = 12, - [0][1][3][14] = 14, - [0][1][5][14] = 36, - [0][1][6][14] = 12, - [0][1][9][14] = 12, - [0][1][8][14] = 42, - [0][1][11][14] = 12, - [0][1][2][15] = 36, - [0][1][1][15] = 12, - [0][1][3][15] = 32, - [0][1][5][15] = 36, - [0][1][6][15] = 12, - [0][1][9][15] = 12, - [0][1][8][15] = 42, - [0][1][11][15] = 12, - [0][1][2][17] = 36, - [0][1][1][17] = 12, - [0][1][3][17] = 32, - [0][1][5][17] = 36, - [0][1][6][17] = 12, - [0][1][9][17] = 12, - [0][1][8][17] = 42, - [0][1][11][17] = 12, - [0][1][2][19] = 36, - [0][1][1][19] = 12, - [0][1][3][19] = 32, - [0][1][5][19] = 36, - [0][1][6][19] = 12, - [0][1][9][19] = 12, - [0][1][8][19] = 42, - [0][1][11][19] = 12, - [0][1][2][21] = 36, - [0][1][1][21] = 12, - [0][1][3][21] = 32, - [0][1][5][21] = 36, - [0][1][6][21] = 12, - [0][1][9][21] = 12, - [0][1][8][21] = 42, - [0][1][11][21] = 12, - [0][1][2][23] = 36, - [0][1][1][23] = 12, - [0][1][3][23] = 32, - [0][1][5][23] = 36, - [0][1][6][23] = 12, - [0][1][9][23] = 12, - [0][1][8][23] = 42, - [0][1][11][23] = 12, - [0][1][2][25] = 36, - [0][1][1][25] = 12, - [0][1][3][25] = 32, - [0][1][5][25] = 127, - [0][1][6][25] = 12, - [0][1][9][25] = 127, - [0][1][8][25] = 42, - [0][1][11][25] = 12, - [0][1][2][27] = 36, - [0][1][1][27] = 12, - [0][1][3][27] = 32, - [0][1][5][27] = 127, - [0][1][6][27] = 12, - [0][1][9][27] = 127, - [0][1][8][27] = 42, - [0][1][11][27] = 12, - [0][1][2][29] = 36, - [0][1][1][29] = 12, - [0][1][3][29] = 32, - [0][1][5][29] = 127, - [0][1][6][29] = 12, - [0][1][9][29] = 127, - [0][1][8][29] = 42, - [0][1][11][29] = 12, - [0][1][2][31] = 36, - [0][1][1][31] = 12, - [0][1][3][31] = 32, - [0][1][5][31] = 36, - [0][1][6][31] = 12, - [0][1][9][31] = 12, - [0][1][8][31] = 42, - [0][1][11][31] = 12, - [0][1][2][33] = 36, - [0][1][1][33] = 12, - [0][1][3][33] = 32, - [0][1][5][33] = 36, - [0][1][6][33] = 12, - [0][1][9][33] = 12, - [0][1][8][33] = 42, - [0][1][11][33] = 12, - [0][1][2][35] = 36, - [0][1][1][35] = 12, - [0][1][3][35] = 32, - [0][1][5][35] = 36, - [0][1][6][35] = 12, - [0][1][9][35] = 12, - [0][1][8][35] = 42, - [0][1][11][35] = 12, - [0][1][2][37] = 36, - [0][1][1][37] = 127, - [0][1][3][37] = 32, - [0][1][5][37] = 36, - [0][1][6][37] = 12, - [0][1][9][37] = 36, - [0][1][8][37] = 42, - [0][1][11][37] = 127, - [0][1][2][38] = 72, - [0][1][1][38] = 16, - [0][1][3][38] = 127, - [0][1][5][38] = 72, - [0][1][6][38] = 16, - [0][1][9][38] = 76, - [0][1][8][38] = 42, - [0][1][11][38] = 16, - [0][1][2][40] = 76, - [0][1][1][40] = 16, - [0][1][3][40] = 127, - [0][1][5][40] = 76, - [0][1][6][40] = 16, - [0][1][9][40] = 76, - [0][1][8][40] = 42, - [0][1][11][40] = 16, - [0][1][2][42] = 76, - [0][1][1][42] = 16, - [0][1][3][42] = 127, - [0][1][5][42] = 76, - [0][1][6][42] = 16, - [0][1][9][42] = 76, - [0][1][8][42] = 42, - [0][1][11][42] = 16, - [0][1][2][44] = 76, - [0][1][1][44] = 16, - [0][1][3][44] = 127, - [0][1][5][44] = 76, - [0][1][6][44] = 16, - [0][1][9][44] = 76, - [0][1][8][44] = 42, - [0][1][11][44] = 16, - [0][1][2][46] = 76, - [0][1][1][46] = 16, - [0][1][3][46] = 127, - [0][1][5][46] = 76, - [0][1][6][46] = 16, - [0][1][9][46] = 76, - [0][1][8][46] = 42, - [0][1][11][46] = 16, - [1][0][2][0] = 62, - [1][0][1][0] = 36, - [1][0][3][0] = 36, - [1][0][5][0] = 34, - [1][0][6][0] = 36, - [1][0][9][0] = 36, - [1][0][8][0] = 30, - [1][0][11][0] = 36, - [1][0][2][2] = 62, - [1][0][1][2] = 36, - [1][0][3][2] = 36, - [1][0][5][2] = 34, - [1][0][6][2] = 36, - [1][0][9][2] = 36, - [1][0][8][2] = 30, - [1][0][11][2] = 36, - [1][0][2][4] = 62, - [1][0][1][4] = 36, - [1][0][3][4] = 36, - [1][0][5][4] = 34, - [1][0][6][4] = 36, - [1][0][9][4] = 36, - [1][0][8][4] = 30, - [1][0][11][4] = 36, - [1][0][2][6] = 62, - [1][0][1][6] = 36, - [1][0][3][6] = 36, - [1][0][5][6] = 34, - [1][0][6][6] = 36, - [1][0][9][6] = 36, - [1][0][8][6] = 30, - [1][0][11][6] = 36, - [1][0][2][8] = 62, - [1][0][1][8] = 36, - [1][0][3][8] = 36, - [1][0][5][8] = 62, - [1][0][6][8] = 36, - [1][0][9][8] = 36, - [1][0][8][8] = 54, - [1][0][11][8] = 36, - [1][0][2][10] = 62, - [1][0][1][10] = 36, - [1][0][3][10] = 36, - [1][0][5][10] = 62, - [1][0][6][10] = 36, - [1][0][9][10] = 36, - [1][0][8][10] = 54, - [1][0][11][10] = 36, - [1][0][2][12] = 62, - [1][0][1][12] = 36, - [1][0][3][12] = 36, - [1][0][5][12] = 62, - [1][0][6][12] = 36, - [1][0][9][12] = 36, - [1][0][8][12] = 54, - [1][0][11][12] = 36, - [1][0][2][14] = 62, - [1][0][1][14] = 36, - [1][0][3][14] = 36, - [1][0][5][14] = 62, - [1][0][6][14] = 36, - [1][0][9][14] = 36, - [1][0][8][14] = 54, - [1][0][11][14] = 36, - [1][0][2][15] = 62, - [1][0][1][15] = 36, - [1][0][3][15] = 58, - [1][0][5][15] = 62, - [1][0][6][15] = 36, - [1][0][9][15] = 36, - [1][0][8][15] = 54, - [1][0][11][15] = 36, - [1][0][2][17] = 62, - [1][0][1][17] = 36, - [1][0][3][17] = 58, - [1][0][5][17] = 62, - [1][0][6][17] = 36, - [1][0][9][17] = 36, - [1][0][8][17] = 54, - [1][0][11][17] = 36, - [1][0][2][19] = 62, - [1][0][1][19] = 36, - [1][0][3][19] = 58, - [1][0][5][19] = 62, - [1][0][6][19] = 36, - [1][0][9][19] = 36, - [1][0][8][19] = 54, - [1][0][11][19] = 36, - [1][0][2][21] = 62, - [1][0][1][21] = 36, - [1][0][3][21] = 58, - [1][0][5][21] = 62, - [1][0][6][21] = 36, - [1][0][9][21] = 36, - [1][0][8][21] = 54, - [1][0][11][21] = 36, - [1][0][2][23] = 62, - [1][0][1][23] = 36, - [1][0][3][23] = 58, - [1][0][5][23] = 62, - [1][0][6][23] = 36, - [1][0][9][23] = 36, - [1][0][8][23] = 54, - [1][0][11][23] = 36, - [1][0][2][25] = 62, - [1][0][1][25] = 36, - [1][0][3][25] = 58, - [1][0][5][25] = 127, - [1][0][6][25] = 36, - [1][0][9][25] = 127, - [1][0][8][25] = 54, - [1][0][11][25] = 36, - [1][0][2][27] = 62, - [1][0][1][27] = 36, - [1][0][3][27] = 58, - [1][0][5][27] = 127, - [1][0][6][27] = 36, - [1][0][9][27] = 127, - [1][0][8][27] = 54, - [1][0][11][27] = 36, - [1][0][2][29] = 62, - [1][0][1][29] = 36, - [1][0][3][29] = 58, - [1][0][5][29] = 127, - [1][0][6][29] = 36, - [1][0][9][29] = 127, - [1][0][8][29] = 54, - [1][0][11][29] = 36, - [1][0][2][31] = 62, - [1][0][1][31] = 36, - [1][0][3][31] = 58, - [1][0][5][31] = 62, - [1][0][6][31] = 36, - [1][0][9][31] = 36, - [1][0][8][31] = 54, - [1][0][11][31] = 36, - [1][0][2][33] = 62, - [1][0][1][33] = 36, - [1][0][3][33] = 58, - [1][0][5][33] = 62, - [1][0][6][33] = 36, - [1][0][9][33] = 36, - [1][0][8][33] = 54, - [1][0][11][33] = 36, - [1][0][2][35] = 62, - [1][0][1][35] = 36, - [1][0][3][35] = 58, - [1][0][5][35] = 62, - [1][0][6][35] = 36, - [1][0][9][35] = 36, - [1][0][8][35] = 54, - [1][0][11][35] = 36, - [1][0][2][37] = 56, - [1][0][1][37] = 62, - [1][0][3][37] = 127, - [1][0][5][37] = 58, - [1][0][6][37] = 62, - [1][0][9][37] = 36, - [1][0][8][37] = 62, - [1][0][11][37] = 54, - [1][0][2][38] = 76, - [1][0][1][38] = 28, - [1][0][3][38] = 127, - [1][0][5][38] = 76, - [1][0][6][38] = 28, - [1][0][9][38] = 76, - [1][0][8][38] = 54, - [1][0][11][38] = 28, - [1][0][2][40] = 76, - [1][0][1][40] = 28, - [1][0][3][40] = 127, - [1][0][5][40] = 76, - [1][0][6][40] = 28, - [1][0][9][40] = 76, - [1][0][8][40] = 54, - [1][0][11][40] = 28, - [1][0][2][42] = 76, - [1][0][1][42] = 28, - [1][0][3][42] = 127, - [1][0][5][42] = 76, - [1][0][6][42] = 28, - [1][0][9][42] = 76, - [1][0][8][42] = 54, - [1][0][11][42] = 28, - [1][0][2][44] = 76, - [1][0][1][44] = 28, - [1][0][3][44] = 127, - [1][0][5][44] = 76, - [1][0][6][44] = 28, - [1][0][9][44] = 76, - [1][0][8][44] = 54, - [1][0][11][44] = 28, - [1][0][2][46] = 76, - [1][0][1][46] = 28, - [1][0][3][46] = 127, - [1][0][5][46] = 76, - [1][0][6][46] = 28, - [1][0][9][46] = 76, - [1][0][8][46] = 54, - [1][0][11][46] = 28, - [1][1][2][0] = 46, - [1][1][1][0] = 22, - [1][1][3][0] = 24, - [1][1][5][0] = 18, - [1][1][6][0] = 22, - [1][1][9][0] = 22, - [1][1][8][0] = 18, - [1][1][11][0] = 22, - [1][1][2][2] = 46, - [1][1][1][2] = 22, - [1][1][3][2] = 24, - [1][1][5][2] = 18, - [1][1][6][2] = 22, - [1][1][9][2] = 22, - [1][1][8][2] = 18, - [1][1][11][2] = 22, - [1][1][2][4] = 46, - [1][1][1][4] = 22, - [1][1][3][4] = 24, - [1][1][5][4] = 18, - [1][1][6][4] = 22, - [1][1][9][4] = 22, - [1][1][8][4] = 18, - [1][1][11][4] = 22, - [1][1][2][6] = 46, - [1][1][1][6] = 22, - [1][1][3][6] = 24, - [1][1][5][6] = 18, - [1][1][6][6] = 22, - [1][1][9][6] = 22, - [1][1][8][6] = 18, - [1][1][11][6] = 22, - [1][1][2][8] = 46, - [1][1][1][8] = 22, - [1][1][3][8] = 24, - [1][1][5][8] = 46, - [1][1][6][8] = 22, - [1][1][9][8] = 22, - [1][1][8][8] = 42, - [1][1][11][8] = 22, - [1][1][2][10] = 46, - [1][1][1][10] = 22, - [1][1][3][10] = 24, - [1][1][5][10] = 46, - [1][1][6][10] = 22, - [1][1][9][10] = 22, - [1][1][8][10] = 42, - [1][1][11][10] = 22, - [1][1][2][12] = 46, - [1][1][1][12] = 22, - [1][1][3][12] = 24, - [1][1][5][12] = 46, - [1][1][6][12] = 22, - [1][1][9][12] = 22, - [1][1][8][12] = 42, - [1][1][11][12] = 22, - [1][1][2][14] = 46, - [1][1][1][14] = 22, - [1][1][3][14] = 24, - [1][1][5][14] = 46, - [1][1][6][14] = 22, - [1][1][9][14] = 22, - [1][1][8][14] = 42, - [1][1][11][14] = 22, - [1][1][2][15] = 46, - [1][1][1][15] = 22, - [1][1][3][15] = 46, - [1][1][5][15] = 46, - [1][1][6][15] = 22, - [1][1][9][15] = 22, - [1][1][8][15] = 42, - [1][1][11][15] = 22, - [1][1][2][17] = 46, - [1][1][1][17] = 22, - [1][1][3][17] = 46, - [1][1][5][17] = 46, - [1][1][6][17] = 22, - [1][1][9][17] = 22, - [1][1][8][17] = 42, - [1][1][11][17] = 22, - [1][1][2][19] = 46, - [1][1][1][19] = 22, - [1][1][3][19] = 46, - [1][1][5][19] = 46, - [1][1][6][19] = 22, - [1][1][9][19] = 22, - [1][1][8][19] = 42, - [1][1][11][19] = 22, - [1][1][2][21] = 46, - [1][1][1][21] = 22, - [1][1][3][21] = 46, - [1][1][5][21] = 46, - [1][1][6][21] = 22, - [1][1][9][21] = 22, - [1][1][8][21] = 42, - [1][1][11][21] = 22, - [1][1][2][23] = 46, - [1][1][1][23] = 22, - [1][1][3][23] = 46, - [1][1][5][23] = 46, - [1][1][6][23] = 22, - [1][1][9][23] = 22, - [1][1][8][23] = 42, - [1][1][11][23] = 22, - [1][1][2][25] = 46, - [1][1][1][25] = 22, - [1][1][3][25] = 46, - [1][1][5][25] = 127, - [1][1][6][25] = 22, - [1][1][9][25] = 127, - [1][1][8][25] = 42, - [1][1][11][25] = 22, - [1][1][2][27] = 46, - [1][1][1][27] = 22, - [1][1][3][27] = 46, - [1][1][5][27] = 127, - [1][1][6][27] = 22, - [1][1][9][27] = 127, - [1][1][8][27] = 42, - [1][1][11][27] = 22, - [1][1][2][29] = 46, - [1][1][1][29] = 22, - [1][1][3][29] = 46, - [1][1][5][29] = 127, - [1][1][6][29] = 22, - [1][1][9][29] = 127, - [1][1][8][29] = 42, - [1][1][11][29] = 22, - [1][1][2][31] = 46, - [1][1][1][31] = 22, - [1][1][3][31] = 46, - [1][1][5][31] = 46, - [1][1][6][31] = 22, - [1][1][9][31] = 22, - [1][1][8][31] = 42, - [1][1][11][31] = 22, - [1][1][2][33] = 46, - [1][1][1][33] = 22, - [1][1][3][33] = 46, - [1][1][5][33] = 46, - [1][1][6][33] = 22, - [1][1][9][33] = 22, - [1][1][8][33] = 42, - [1][1][11][33] = 22, - [1][1][2][35] = 46, - [1][1][1][35] = 22, - [1][1][3][35] = 46, - [1][1][5][35] = 46, - [1][1][6][35] = 22, - [1][1][9][35] = 22, - [1][1][8][35] = 42, - [1][1][11][35] = 22, - [1][1][2][37] = 46, - [1][1][1][37] = 127, - [1][1][3][37] = 46, - [1][1][5][37] = 46, - [1][1][6][37] = 22, - [1][1][9][37] = 50, - [1][1][8][37] = 42, - [1][1][11][37] = 127, - [1][1][2][38] = 74, - [1][1][1][38] = 16, - [1][1][3][38] = 127, - [1][1][5][38] = 74, - [1][1][6][38] = 16, - [1][1][9][38] = 76, - [1][1][8][38] = 42, - [1][1][11][38] = 16, - [1][1][2][40] = 76, - [1][1][1][40] = 16, - [1][1][3][40] = 127, - [1][1][5][40] = 76, - [1][1][6][40] = 16, - [1][1][9][40] = 76, - [1][1][8][40] = 42, - [1][1][11][40] = 16, - [1][1][2][42] = 76, - [1][1][1][42] = 16, - [1][1][3][42] = 127, - [1][1][5][42] = 76, - [1][1][6][42] = 16, - [1][1][9][42] = 76, - [1][1][8][42] = 42, - [1][1][11][42] = 16, - [1][1][2][44] = 76, - [1][1][1][44] = 16, - [1][1][3][44] = 127, - [1][1][5][44] = 76, - [1][1][6][44] = 16, - [1][1][9][44] = 76, - [1][1][8][44] = 42, - [1][1][11][44] = 16, - [1][1][2][46] = 76, - [1][1][1][46] = 16, - [1][1][3][46] = 127, - [1][1][5][46] = 76, - [1][1][6][46] = 16, - [1][1][9][46] = 76, - [1][1][8][46] = 42, - [1][1][11][46] = 16, - [2][0][2][0] = 74, - [2][0][1][0] = 46, - [2][0][3][0] = 50, - [2][0][5][0] = 46, - [2][0][6][0] = 46, - [2][0][9][0] = 46, - [2][0][8][0] = 30, - [2][0][11][0] = 46, - [2][0][2][2] = 74, - [2][0][1][2] = 46, - [2][0][3][2] = 50, - [2][0][5][2] = 46, - [2][0][6][2] = 46, - [2][0][9][2] = 46, - [2][0][8][2] = 30, - [2][0][11][2] = 46, - [2][0][2][4] = 74, - [2][0][1][4] = 46, - [2][0][3][4] = 50, - [2][0][5][4] = 46, - [2][0][6][4] = 46, - [2][0][9][4] = 46, - [2][0][8][4] = 30, - [2][0][11][4] = 46, - [2][0][2][6] = 74, - [2][0][1][6] = 46, - [2][0][3][6] = 50, - [2][0][5][6] = 46, - [2][0][6][6] = 46, - [2][0][9][6] = 46, - [2][0][8][6] = 30, - [2][0][11][6] = 46, - [2][0][2][8] = 74, - [2][0][1][8] = 46, - [2][0][3][8] = 50, - [2][0][5][8] = 66, - [2][0][6][8] = 46, - [2][0][9][8] = 46, - [2][0][8][8] = 54, - [2][0][11][8] = 46, - [2][0][2][10] = 74, - [2][0][1][10] = 46, - [2][0][3][10] = 50, - [2][0][5][10] = 66, - [2][0][6][10] = 46, - [2][0][9][10] = 46, - [2][0][8][10] = 54, - [2][0][11][10] = 46, - [2][0][2][12] = 74, - [2][0][1][12] = 46, - [2][0][3][12] = 50, - [2][0][5][12] = 66, - [2][0][6][12] = 46, - [2][0][9][12] = 46, - [2][0][8][12] = 54, - [2][0][11][12] = 46, - [2][0][2][14] = 74, - [2][0][1][14] = 46, - [2][0][3][14] = 50, - [2][0][5][14] = 66, - [2][0][6][14] = 46, - [2][0][9][14] = 46, - [2][0][8][14] = 54, - [2][0][11][14] = 46, - [2][0][2][15] = 74, - [2][0][1][15] = 46, - [2][0][3][15] = 70, - [2][0][5][15] = 74, - [2][0][6][15] = 46, - [2][0][9][15] = 46, - [2][0][8][15] = 54, - [2][0][11][15] = 46, - [2][0][2][17] = 74, - [2][0][1][17] = 46, - [2][0][3][17] = 70, - [2][0][5][17] = 74, - [2][0][6][17] = 46, - [2][0][9][17] = 46, - [2][0][8][17] = 54, - [2][0][11][17] = 46, - [2][0][2][19] = 74, - [2][0][1][19] = 46, - [2][0][3][19] = 70, - [2][0][5][19] = 74, - [2][0][6][19] = 46, - [2][0][9][19] = 46, - [2][0][8][19] = 54, - [2][0][11][19] = 46, - [2][0][2][21] = 74, - [2][0][1][21] = 46, - [2][0][3][21] = 70, - [2][0][5][21] = 74, - [2][0][6][21] = 46, - [2][0][9][21] = 46, - [2][0][8][21] = 54, - [2][0][11][21] = 46, - [2][0][2][23] = 74, - [2][0][1][23] = 46, - [2][0][3][23] = 70, - [2][0][5][23] = 74, - [2][0][6][23] = 46, - [2][0][9][23] = 46, - [2][0][8][23] = 54, - [2][0][11][23] = 46, - [2][0][2][25] = 74, - [2][0][1][25] = 46, - [2][0][3][25] = 70, - [2][0][5][25] = 127, - [2][0][6][25] = 46, - [2][0][9][25] = 127, - [2][0][8][25] = 54, - [2][0][11][25] = 46, - [2][0][2][27] = 74, - [2][0][1][27] = 46, - [2][0][3][27] = 70, - [2][0][5][27] = 127, - [2][0][6][27] = 46, - [2][0][9][27] = 127, - [2][0][8][27] = 54, - [2][0][11][27] = 46, - [2][0][2][29] = 74, - [2][0][1][29] = 46, - [2][0][3][29] = 70, - [2][0][5][29] = 127, - [2][0][6][29] = 46, - [2][0][9][29] = 127, - [2][0][8][29] = 54, - [2][0][11][29] = 46, - [2][0][2][31] = 74, - [2][0][1][31] = 46, - [2][0][3][31] = 70, - [2][0][5][31] = 74, - [2][0][6][31] = 46, - [2][0][9][31] = 46, - [2][0][8][31] = 54, - [2][0][11][31] = 46, - [2][0][2][33] = 74, - [2][0][1][33] = 46, - [2][0][3][33] = 70, - [2][0][5][33] = 74, - [2][0][6][33] = 46, - [2][0][9][33] = 46, - [2][0][8][33] = 54, - [2][0][11][33] = 46, - [2][0][2][35] = 74, - [2][0][1][35] = 46, - [2][0][3][35] = 70, - [2][0][5][35] = 74, - [2][0][6][35] = 46, - [2][0][9][35] = 46, - [2][0][8][35] = 54, - [2][0][11][35] = 46, - [2][0][2][37] = 74, - [2][0][1][37] = 127, - [2][0][3][37] = 70, - [2][0][5][37] = 74, - [2][0][6][37] = 46, - [2][0][9][37] = 74, - [2][0][8][37] = 54, - [2][0][11][37] = 127, - [2][0][2][38] = 76, - [2][0][1][38] = 28, - [2][0][3][38] = 127, - [2][0][5][38] = 76, - [2][0][6][38] = 28, - [2][0][9][38] = 76, - [2][0][8][38] = 54, - [2][0][11][38] = 28, - [2][0][2][40] = 76, - [2][0][1][40] = 28, - [2][0][3][40] = 127, - [2][0][5][40] = 76, - [2][0][6][40] = 28, - [2][0][9][40] = 76, - [2][0][8][40] = 54, - [2][0][11][40] = 28, - [2][0][2][42] = 76, - [2][0][1][42] = 28, - [2][0][3][42] = 127, - [2][0][5][42] = 76, - [2][0][6][42] = 28, - [2][0][9][42] = 76, - [2][0][8][42] = 54, - [2][0][11][42] = 28, - [2][0][2][44] = 76, - [2][0][1][44] = 28, - [2][0][3][44] = 127, - [2][0][5][44] = 76, - [2][0][6][44] = 28, - [2][0][9][44] = 76, - [2][0][8][44] = 54, - [2][0][11][44] = 28, - [2][0][2][46] = 76, - [2][0][1][46] = 28, - [2][0][3][46] = 127, - [2][0][5][46] = 76, - [2][0][6][46] = 28, - [2][0][9][46] = 76, - [2][0][8][46] = 54, - [2][0][11][46] = 28, - [2][1][2][0] = 58, - [2][1][1][0] = 32, - [2][1][3][0] = 38, - [2][1][5][0] = 30, - [2][1][6][0] = 32, - [2][1][9][0] = 32, - [2][1][8][0] = 18, - [2][1][11][0] = 32, - [2][1][2][2] = 58, - [2][1][1][2] = 32, - [2][1][3][2] = 38, - [2][1][5][2] = 30, - [2][1][6][2] = 32, - [2][1][9][2] = 32, - [2][1][8][2] = 18, - [2][1][11][2] = 32, - [2][1][2][4] = 58, - [2][1][1][4] = 32, - [2][1][3][4] = 38, - [2][1][5][4] = 30, - [2][1][6][4] = 32, - [2][1][9][4] = 32, - [2][1][8][4] = 18, - [2][1][11][4] = 32, - [2][1][2][6] = 58, - [2][1][1][6] = 32, - [2][1][3][6] = 38, - [2][1][5][6] = 30, - [2][1][6][6] = 32, - [2][1][9][6] = 32, - [2][1][8][6] = 18, - [2][1][11][6] = 32, - [2][1][2][8] = 58, - [2][1][1][8] = 32, - [2][1][3][8] = 38, - [2][1][5][8] = 52, - [2][1][6][8] = 32, - [2][1][9][8] = 32, - [2][1][8][8] = 42, - [2][1][11][8] = 32, - [2][1][2][10] = 58, - [2][1][1][10] = 32, - [2][1][3][10] = 38, - [2][1][5][10] = 52, - [2][1][6][10] = 32, - [2][1][9][10] = 32, - [2][1][8][10] = 42, - [2][1][11][10] = 32, - [2][1][2][12] = 58, - [2][1][1][12] = 32, - [2][1][3][12] = 38, - [2][1][5][12] = 52, - [2][1][6][12] = 32, - [2][1][9][12] = 32, - [2][1][8][12] = 42, - [2][1][11][12] = 32, - [2][1][2][14] = 58, - [2][1][1][14] = 32, - [2][1][3][14] = 38, - [2][1][5][14] = 52, - [2][1][6][14] = 32, - [2][1][9][14] = 32, - [2][1][8][14] = 42, - [2][1][11][14] = 32, - [2][1][2][15] = 58, - [2][1][1][15] = 32, - [2][1][3][15] = 58, - [2][1][5][15] = 58, - [2][1][6][15] = 32, - [2][1][9][15] = 32, - [2][1][8][15] = 42, - [2][1][11][15] = 32, - [2][1][2][17] = 58, - [2][1][1][17] = 32, - [2][1][3][17] = 58, - [2][1][5][17] = 58, - [2][1][6][17] = 32, - [2][1][9][17] = 32, - [2][1][8][17] = 42, - [2][1][11][17] = 32, - [2][1][2][19] = 58, - [2][1][1][19] = 32, - [2][1][3][19] = 58, - [2][1][5][19] = 58, - [2][1][6][19] = 32, - [2][1][9][19] = 32, - [2][1][8][19] = 42, - [2][1][11][19] = 32, - [2][1][2][21] = 58, - [2][1][1][21] = 32, - [2][1][3][21] = 58, - [2][1][5][21] = 58, - [2][1][6][21] = 32, - [2][1][9][21] = 32, - [2][1][8][21] = 42, - [2][1][11][21] = 32, - [2][1][2][23] = 58, - [2][1][1][23] = 32, - [2][1][3][23] = 58, - [2][1][5][23] = 58, - [2][1][6][23] = 32, - [2][1][9][23] = 32, - [2][1][8][23] = 42, - [2][1][11][23] = 32, - [2][1][2][25] = 58, - [2][1][1][25] = 32, - [2][1][3][25] = 58, - [2][1][5][25] = 127, - [2][1][6][25] = 32, - [2][1][9][25] = 127, - [2][1][8][25] = 42, - [2][1][11][25] = 32, - [2][1][2][27] = 58, - [2][1][1][27] = 32, - [2][1][3][27] = 58, - [2][1][5][27] = 127, - [2][1][6][27] = 32, - [2][1][9][27] = 127, - [2][1][8][27] = 42, - [2][1][11][27] = 32, - [2][1][2][29] = 58, - [2][1][1][29] = 32, - [2][1][3][29] = 58, - [2][1][5][29] = 127, - [2][1][6][29] = 32, - [2][1][9][29] = 127, - [2][1][8][29] = 42, - [2][1][11][29] = 32, - [2][1][2][31] = 58, - [2][1][1][31] = 32, - [2][1][3][31] = 58, - [2][1][5][31] = 58, - [2][1][6][31] = 32, - [2][1][9][31] = 32, - [2][1][8][31] = 42, - [2][1][11][31] = 32, - [2][1][2][33] = 58, - [2][1][1][33] = 32, - [2][1][3][33] = 58, - [2][1][5][33] = 58, - [2][1][6][33] = 32, - [2][1][9][33] = 32, - [2][1][8][33] = 42, - [2][1][11][33] = 32, - [2][1][2][35] = 58, - [2][1][1][35] = 32, - [2][1][3][35] = 58, - [2][1][5][35] = 58, - [2][1][6][35] = 32, - [2][1][9][35] = 32, - [2][1][8][35] = 42, - [2][1][11][35] = 32, - [2][1][2][37] = 58, - [2][1][1][37] = 127, - [2][1][3][37] = 58, - [2][1][5][37] = 58, - [2][1][6][37] = 32, - [2][1][9][37] = 62, - [2][1][8][37] = 42, - [2][1][11][37] = 127, - [2][1][2][38] = 76, - [2][1][1][38] = 16, - [2][1][3][38] = 127, - [2][1][5][38] = 76, - [2][1][6][38] = 16, - [2][1][9][38] = 76, - [2][1][8][38] = 42, - [2][1][11][38] = 16, - [2][1][2][40] = 76, - [2][1][1][40] = 16, - [2][1][3][40] = 127, - [2][1][5][40] = 76, - [2][1][6][40] = 16, - [2][1][9][40] = 76, - [2][1][8][40] = 42, - [2][1][11][40] = 16, - [2][1][2][42] = 76, - [2][1][1][42] = 16, - [2][1][3][42] = 127, - [2][1][5][42] = 76, - [2][1][6][42] = 16, - [2][1][9][42] = 76, - [2][1][8][42] = 42, - [2][1][11][42] = 16, - [2][1][2][44] = 76, - [2][1][1][44] = 16, - [2][1][3][44] = 127, - [2][1][5][44] = 76, - [2][1][6][44] = 16, - [2][1][9][44] = 76, - [2][1][8][44] = 42, - [2][1][11][44] = 16, - [2][1][2][46] = 76, - [2][1][1][46] = 16, - [2][1][3][46] = 127, - [2][1][5][46] = 76, - [2][1][6][46] = 16, - [2][1][9][46] = 76, - [2][1][8][46] = 42, - [2][1][11][46] = 16, + [0][0][RTW89_WW][0] = 22, + [0][0][RTW89_WW][2] = 22, + [0][0][RTW89_WW][4] = 22, + [0][0][RTW89_WW][6] = 20, + [0][0][RTW89_WW][8] = 24, + [0][0][RTW89_WW][10] = 24, + [0][0][RTW89_WW][12] = 24, + [0][0][RTW89_WW][14] = 24, + [0][0][RTW89_WW][15] = 24, + [0][0][RTW89_WW][17] = 24, + [0][0][RTW89_WW][19] = 24, + [0][0][RTW89_WW][21] = 24, + [0][0][RTW89_WW][23] = 24, + [0][0][RTW89_WW][25] = 24, + [0][0][RTW89_WW][27] = 24, + [0][0][RTW89_WW][29] = 24, + [0][0][RTW89_WW][31] = 24, + [0][0][RTW89_WW][33] = 24, + [0][0][RTW89_WW][35] = 24, + [0][0][RTW89_WW][37] = 44, + [0][0][RTW89_WW][38] = 28, + [0][0][RTW89_WW][40] = 28, + [0][0][RTW89_WW][42] = 28, + [0][0][RTW89_WW][44] = 28, + [0][0][RTW89_WW][46] = 28, + [0][1][RTW89_WW][0] = 8, + [0][1][RTW89_WW][2] = 8, + [0][1][RTW89_WW][4] = 8, + [0][1][RTW89_WW][6] = 4, + [0][1][RTW89_WW][8] = 12, + [0][1][RTW89_WW][10] = 12, + [0][1][RTW89_WW][12] = 12, + [0][1][RTW89_WW][14] = 12, + [0][1][RTW89_WW][15] = 12, + [0][1][RTW89_WW][17] = 12, + [0][1][RTW89_WW][19] = 12, + [0][1][RTW89_WW][21] = 12, + [0][1][RTW89_WW][23] = 12, + [0][1][RTW89_WW][25] = 12, + [0][1][RTW89_WW][27] = 12, + [0][1][RTW89_WW][29] = 12, + [0][1][RTW89_WW][31] = 12, + [0][1][RTW89_WW][33] = 12, + [0][1][RTW89_WW][35] = 12, + [0][1][RTW89_WW][37] = 32, + [0][1][RTW89_WW][38] = 16, + [0][1][RTW89_WW][40] = 16, + [0][1][RTW89_WW][42] = 16, + [0][1][RTW89_WW][44] = 16, + [0][1][RTW89_WW][46] = 16, + [1][0][RTW89_WW][0] = 30, + [1][0][RTW89_WW][2] = 30, + [1][0][RTW89_WW][4] = 30, + [1][0][RTW89_WW][6] = 30, + [1][0][RTW89_WW][8] = 36, + [1][0][RTW89_WW][10] = 36, + [1][0][RTW89_WW][12] = 36, + [1][0][RTW89_WW][14] = 36, + [1][0][RTW89_WW][15] = 36, + [1][0][RTW89_WW][17] = 36, + [1][0][RTW89_WW][19] = 36, + [1][0][RTW89_WW][21] = 36, + [1][0][RTW89_WW][23] = 36, + [1][0][RTW89_WW][25] = 36, + [1][0][RTW89_WW][27] = 36, + [1][0][RTW89_WW][29] = 36, + [1][0][RTW89_WW][31] = 36, + [1][0][RTW89_WW][33] = 36, + [1][0][RTW89_WW][35] = 36, + [1][0][RTW89_WW][37] = 54, + [1][0][RTW89_WW][38] = 28, + [1][0][RTW89_WW][40] = 28, + [1][0][RTW89_WW][42] = 28, + [1][0][RTW89_WW][44] = 28, + [1][0][RTW89_WW][46] = 28, + [1][1][RTW89_WW][0] = 18, + [1][1][RTW89_WW][2] = 18, + [1][1][RTW89_WW][4] = 18, + [1][1][RTW89_WW][6] = 16, + [1][1][RTW89_WW][8] = 22, + [1][1][RTW89_WW][10] = 22, + [1][1][RTW89_WW][12] = 22, + [1][1][RTW89_WW][14] = 22, + [1][1][RTW89_WW][15] = 22, + [1][1][RTW89_WW][17] = 22, + [1][1][RTW89_WW][19] = 22, + [1][1][RTW89_WW][21] = 22, + [1][1][RTW89_WW][23] = 22, + [1][1][RTW89_WW][25] = 22, + [1][1][RTW89_WW][27] = 22, + [1][1][RTW89_WW][29] = 22, + [1][1][RTW89_WW][31] = 22, + [1][1][RTW89_WW][33] = 22, + [1][1][RTW89_WW][35] = 22, + [1][1][RTW89_WW][37] = 42, + [1][1][RTW89_WW][38] = 16, + [1][1][RTW89_WW][40] = 16, + [1][1][RTW89_WW][42] = 16, + [1][1][RTW89_WW][44] = 16, + [1][1][RTW89_WW][46] = 16, + [2][0][RTW89_WW][0] = 30, + [2][0][RTW89_WW][2] = 30, + [2][0][RTW89_WW][4] = 30, + [2][0][RTW89_WW][6] = 30, + [2][0][RTW89_WW][8] = 46, + [2][0][RTW89_WW][10] = 46, + [2][0][RTW89_WW][12] = 46, + [2][0][RTW89_WW][14] = 46, + [2][0][RTW89_WW][15] = 46, + [2][0][RTW89_WW][17] = 46, + [2][0][RTW89_WW][19] = 46, + [2][0][RTW89_WW][21] = 46, + [2][0][RTW89_WW][23] = 46, + [2][0][RTW89_WW][25] = 46, + [2][0][RTW89_WW][27] = 46, + [2][0][RTW89_WW][29] = 46, + [2][0][RTW89_WW][31] = 46, + [2][0][RTW89_WW][33] = 46, + [2][0][RTW89_WW][35] = 46, + [2][0][RTW89_WW][37] = 54, + [2][0][RTW89_WW][38] = 28, + [2][0][RTW89_WW][40] = 28, + [2][0][RTW89_WW][42] = 28, + [2][0][RTW89_WW][44] = 28, + [2][0][RTW89_WW][46] = 28, + [2][1][RTW89_WW][0] = 18, + [2][1][RTW89_WW][2] = 18, + [2][1][RTW89_WW][4] = 18, + [2][1][RTW89_WW][6] = 18, + [2][1][RTW89_WW][8] = 32, + [2][1][RTW89_WW][10] = 32, + [2][1][RTW89_WW][12] = 32, + [2][1][RTW89_WW][14] = 32, + [2][1][RTW89_WW][15] = 32, + [2][1][RTW89_WW][17] = 32, + [2][1][RTW89_WW][19] = 32, + [2][1][RTW89_WW][21] = 32, + [2][1][RTW89_WW][23] = 32, + [2][1][RTW89_WW][25] = 32, + [2][1][RTW89_WW][27] = 32, + [2][1][RTW89_WW][29] = 32, + [2][1][RTW89_WW][31] = 32, + [2][1][RTW89_WW][33] = 32, + [2][1][RTW89_WW][35] = 32, + [2][1][RTW89_WW][37] = 42, + [2][1][RTW89_WW][38] = 16, + [2][1][RTW89_WW][40] = 16, + [2][1][RTW89_WW][42] = 16, + [2][1][RTW89_WW][44] = 16, + [2][1][RTW89_WW][46] = 16, + [0][0][RTW89_FCC][0] = 48, + [0][0][RTW89_ETSI][0] = 24, + [0][0][RTW89_MKK][0] = 26, + [0][0][RTW89_IC][0] = 22, + [0][0][RTW89_KCC][0] = 46, + [0][0][RTW89_ACMA][0] = 24, + [0][0][RTW89_CHILE][0] = 30, + [0][0][RTW89_UKRAINE][0] = 24, + [0][0][RTW89_MEXICO][0] = 48, + [0][0][RTW89_CN][0] = 24, + [0][0][RTW89_QATAR][0] = 24, + [0][0][RTW89_FCC][2] = 48, + [0][0][RTW89_ETSI][2] = 24, + [0][0][RTW89_MKK][2] = 26, + [0][0][RTW89_IC][2] = 22, + [0][0][RTW89_KCC][2] = 46, + [0][0][RTW89_ACMA][2] = 24, + [0][0][RTW89_CHILE][2] = 30, + [0][0][RTW89_UKRAINE][2] = 24, + [0][0][RTW89_MEXICO][2] = 48, + [0][0][RTW89_CN][2] = 24, + [0][0][RTW89_QATAR][2] = 24, + [0][0][RTW89_FCC][4] = 48, + [0][0][RTW89_ETSI][4] = 24, + [0][0][RTW89_MKK][4] = 26, + [0][0][RTW89_IC][4] = 22, + [0][0][RTW89_KCC][4] = 46, + [0][0][RTW89_ACMA][4] = 24, + [0][0][RTW89_CHILE][4] = 30, + [0][0][RTW89_UKRAINE][4] = 24, + [0][0][RTW89_MEXICO][4] = 48, + [0][0][RTW89_CN][4] = 24, + [0][0][RTW89_QATAR][4] = 24, + [0][0][RTW89_FCC][6] = 48, + [0][0][RTW89_ETSI][6] = 24, + [0][0][RTW89_MKK][6] = 26, + [0][0][RTW89_IC][6] = 22, + [0][0][RTW89_KCC][6] = 20, + [0][0][RTW89_ACMA][6] = 24, + [0][0][RTW89_CHILE][6] = 30, + [0][0][RTW89_UKRAINE][6] = 24, + [0][0][RTW89_MEXICO][6] = 48, + [0][0][RTW89_CN][6] = 24, + [0][0][RTW89_QATAR][6] = 24, + [0][0][RTW89_FCC][8] = 48, + [0][0][RTW89_ETSI][8] = 24, + [0][0][RTW89_MKK][8] = 26, + [0][0][RTW89_IC][8] = 48, + [0][0][RTW89_KCC][8] = 46, + [0][0][RTW89_ACMA][8] = 24, + [0][0][RTW89_CHILE][8] = 48, + [0][0][RTW89_UKRAINE][8] = 24, + [0][0][RTW89_MEXICO][8] = 48, + [0][0][RTW89_CN][8] = 24, + [0][0][RTW89_QATAR][8] = 24, + [0][0][RTW89_FCC][10] = 48, + [0][0][RTW89_ETSI][10] = 24, + [0][0][RTW89_MKK][10] = 26, + [0][0][RTW89_IC][10] = 48, + [0][0][RTW89_KCC][10] = 46, + [0][0][RTW89_ACMA][10] = 24, + [0][0][RTW89_CHILE][10] = 48, + [0][0][RTW89_UKRAINE][10] = 24, + [0][0][RTW89_MEXICO][10] = 48, + [0][0][RTW89_CN][10] = 24, + [0][0][RTW89_QATAR][10] = 24, + [0][0][RTW89_FCC][12] = 48, + [0][0][RTW89_ETSI][12] = 24, + [0][0][RTW89_MKK][12] = 26, + [0][0][RTW89_IC][12] = 48, + [0][0][RTW89_KCC][12] = 46, + [0][0][RTW89_ACMA][12] = 24, + [0][0][RTW89_CHILE][12] = 48, + [0][0][RTW89_UKRAINE][12] = 24, + [0][0][RTW89_MEXICO][12] = 48, + [0][0][RTW89_CN][12] = 24, + [0][0][RTW89_QATAR][12] = 24, + [0][0][RTW89_FCC][14] = 48, + [0][0][RTW89_ETSI][14] = 24, + [0][0][RTW89_MKK][14] = 26, + [0][0][RTW89_IC][14] = 48, + [0][0][RTW89_KCC][14] = 46, + [0][0][RTW89_ACMA][14] = 24, + [0][0][RTW89_CHILE][14] = 48, + [0][0][RTW89_UKRAINE][14] = 24, + [0][0][RTW89_MEXICO][14] = 48, + [0][0][RTW89_CN][14] = 24, + [0][0][RTW89_QATAR][14] = 24, + [0][0][RTW89_FCC][15] = 48, + [0][0][RTW89_ETSI][15] = 24, + [0][0][RTW89_MKK][15] = 44, + [0][0][RTW89_IC][15] = 48, + [0][0][RTW89_KCC][15] = 46, + [0][0][RTW89_ACMA][15] = 24, + [0][0][RTW89_CHILE][15] = 48, + [0][0][RTW89_UKRAINE][15] = 24, + [0][0][RTW89_MEXICO][15] = 48, + [0][0][RTW89_CN][15] = 127, + [0][0][RTW89_QATAR][15] = 24, + [0][0][RTW89_FCC][17] = 48, + [0][0][RTW89_ETSI][17] = 24, + [0][0][RTW89_MKK][17] = 44, + [0][0][RTW89_IC][17] = 48, + [0][0][RTW89_KCC][17] = 46, + [0][0][RTW89_ACMA][17] = 24, + [0][0][RTW89_CHILE][17] = 48, + [0][0][RTW89_UKRAINE][17] = 24, + [0][0][RTW89_MEXICO][17] = 48, + [0][0][RTW89_CN][17] = 127, + [0][0][RTW89_QATAR][17] = 24, + [0][0][RTW89_FCC][19] = 48, + [0][0][RTW89_ETSI][19] = 24, + [0][0][RTW89_MKK][19] = 44, + [0][0][RTW89_IC][19] = 48, + [0][0][RTW89_KCC][19] = 46, + [0][0][RTW89_ACMA][19] = 24, + [0][0][RTW89_CHILE][19] = 48, + [0][0][RTW89_UKRAINE][19] = 24, + [0][0][RTW89_MEXICO][19] = 48, + [0][0][RTW89_CN][19] = 127, + [0][0][RTW89_QATAR][19] = 24, + [0][0][RTW89_FCC][21] = 48, + [0][0][RTW89_ETSI][21] = 24, + [0][0][RTW89_MKK][21] = 44, + [0][0][RTW89_IC][21] = 48, + [0][0][RTW89_KCC][21] = 46, + [0][0][RTW89_ACMA][21] = 24, + [0][0][RTW89_CHILE][21] = 48, + [0][0][RTW89_UKRAINE][21] = 24, + [0][0][RTW89_MEXICO][21] = 48, + [0][0][RTW89_CN][21] = 127, + [0][0][RTW89_QATAR][21] = 24, + [0][0][RTW89_FCC][23] = 48, + [0][0][RTW89_ETSI][23] = 24, + [0][0][RTW89_MKK][23] = 44, + [0][0][RTW89_IC][23] = 48, + [0][0][RTW89_KCC][23] = 46, + [0][0][RTW89_ACMA][23] = 24, + [0][0][RTW89_CHILE][23] = 48, + [0][0][RTW89_UKRAINE][23] = 24, + [0][0][RTW89_MEXICO][23] = 48, + [0][0][RTW89_CN][23] = 127, + [0][0][RTW89_QATAR][23] = 24, + [0][0][RTW89_FCC][25] = 48, + [0][0][RTW89_ETSI][25] = 24, + [0][0][RTW89_MKK][25] = 44, + [0][0][RTW89_IC][25] = 127, + [0][0][RTW89_KCC][25] = 46, + [0][0][RTW89_ACMA][25] = 127, + [0][0][RTW89_CHILE][25] = 48, + [0][0][RTW89_UKRAINE][25] = 24, + [0][0][RTW89_MEXICO][25] = 48, + [0][0][RTW89_CN][25] = 127, + [0][0][RTW89_QATAR][25] = 24, + [0][0][RTW89_FCC][27] = 48, + [0][0][RTW89_ETSI][27] = 24, + [0][0][RTW89_MKK][27] = 44, + [0][0][RTW89_IC][27] = 127, + [0][0][RTW89_KCC][27] = 46, + [0][0][RTW89_ACMA][27] = 127, + [0][0][RTW89_CHILE][27] = 48, + [0][0][RTW89_UKRAINE][27] = 24, + [0][0][RTW89_MEXICO][27] = 48, + [0][0][RTW89_CN][27] = 127, + [0][0][RTW89_QATAR][27] = 24, + [0][0][RTW89_FCC][29] = 48, + [0][0][RTW89_ETSI][29] = 24, + [0][0][RTW89_MKK][29] = 44, + [0][0][RTW89_IC][29] = 127, + [0][0][RTW89_KCC][29] = 46, + [0][0][RTW89_ACMA][29] = 127, + [0][0][RTW89_CHILE][29] = 48, + [0][0][RTW89_UKRAINE][29] = 24, + [0][0][RTW89_MEXICO][29] = 48, + [0][0][RTW89_CN][29] = 127, + [0][0][RTW89_QATAR][29] = 24, + [0][0][RTW89_FCC][31] = 48, + [0][0][RTW89_ETSI][31] = 24, + [0][0][RTW89_MKK][31] = 44, + [0][0][RTW89_IC][31] = 48, + [0][0][RTW89_KCC][31] = 46, + [0][0][RTW89_ACMA][31] = 24, + [0][0][RTW89_CHILE][31] = 48, + [0][0][RTW89_UKRAINE][31] = 24, + [0][0][RTW89_MEXICO][31] = 48, + [0][0][RTW89_CN][31] = 127, + [0][0][RTW89_QATAR][31] = 24, + [0][0][RTW89_FCC][33] = 48, + [0][0][RTW89_ETSI][33] = 24, + [0][0][RTW89_MKK][33] = 44, + [0][0][RTW89_IC][33] = 48, + [0][0][RTW89_KCC][33] = 46, + [0][0][RTW89_ACMA][33] = 24, + [0][0][RTW89_CHILE][33] = 48, + [0][0][RTW89_UKRAINE][33] = 24, + [0][0][RTW89_MEXICO][33] = 48, + [0][0][RTW89_CN][33] = 127, + [0][0][RTW89_QATAR][33] = 24, + [0][0][RTW89_FCC][35] = 48, + [0][0][RTW89_ETSI][35] = 24, + [0][0][RTW89_MKK][35] = 44, + [0][0][RTW89_IC][35] = 48, + [0][0][RTW89_KCC][35] = 46, + [0][0][RTW89_ACMA][35] = 24, + [0][0][RTW89_CHILE][35] = 48, + [0][0][RTW89_UKRAINE][35] = 24, + [0][0][RTW89_MEXICO][35] = 48, + [0][0][RTW89_CN][35] = 127, + [0][0][RTW89_QATAR][35] = 24, + [0][0][RTW89_FCC][37] = 48, + [0][0][RTW89_ETSI][37] = 127, + [0][0][RTW89_MKK][37] = 44, + [0][0][RTW89_IC][37] = 48, + [0][0][RTW89_KCC][37] = 46, + [0][0][RTW89_ACMA][37] = 48, + [0][0][RTW89_CHILE][37] = 48, + [0][0][RTW89_UKRAINE][37] = 127, + [0][0][RTW89_MEXICO][37] = 48, + [0][0][RTW89_CN][37] = 127, + [0][0][RTW89_QATAR][37] = 127, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_ETSI][38] = 28, + [0][0][RTW89_MKK][38] = 127, + [0][0][RTW89_IC][38] = 76, + [0][0][RTW89_KCC][38] = 46, + [0][0][RTW89_ACMA][38] = 76, + [0][0][RTW89_CHILE][38] = 54, + [0][0][RTW89_UKRAINE][38] = 28, + [0][0][RTW89_MEXICO][38] = 76, + [0][0][RTW89_CN][38] = 62, + [0][0][RTW89_QATAR][38] = 28, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_ETSI][40] = 28, + [0][0][RTW89_MKK][40] = 127, + [0][0][RTW89_IC][40] = 76, + [0][0][RTW89_KCC][40] = 46, + [0][0][RTW89_ACMA][40] = 76, + [0][0][RTW89_CHILE][40] = 54, + [0][0][RTW89_UKRAINE][40] = 28, + [0][0][RTW89_MEXICO][40] = 76, + [0][0][RTW89_CN][40] = 62, + [0][0][RTW89_QATAR][40] = 28, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_ETSI][42] = 28, + [0][0][RTW89_MKK][42] = 127, + [0][0][RTW89_IC][42] = 76, + [0][0][RTW89_KCC][42] = 46, + [0][0][RTW89_ACMA][42] = 76, + [0][0][RTW89_CHILE][42] = 54, + [0][0][RTW89_UKRAINE][42] = 28, + [0][0][RTW89_MEXICO][42] = 76, + [0][0][RTW89_CN][42] = 62, + [0][0][RTW89_QATAR][42] = 28, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_ETSI][44] = 28, + [0][0][RTW89_MKK][44] = 127, + [0][0][RTW89_IC][44] = 76, + [0][0][RTW89_KCC][44] = 46, + [0][0][RTW89_ACMA][44] = 76, + [0][0][RTW89_CHILE][44] = 54, + [0][0][RTW89_UKRAINE][44] = 28, + [0][0][RTW89_MEXICO][44] = 76, + [0][0][RTW89_CN][44] = 62, + [0][0][RTW89_QATAR][44] = 28, + [0][0][RTW89_FCC][46] = 76, + [0][0][RTW89_ETSI][46] = 28, + [0][0][RTW89_MKK][46] = 127, + [0][0][RTW89_IC][46] = 76, + [0][0][RTW89_KCC][46] = 46, + [0][0][RTW89_ACMA][46] = 76, + [0][0][RTW89_CHILE][46] = 54, + [0][0][RTW89_UKRAINE][46] = 28, + [0][0][RTW89_MEXICO][46] = 76, + [0][0][RTW89_CN][46] = 62, + [0][0][RTW89_QATAR][46] = 28, + [0][1][RTW89_FCC][0] = 36, + [0][1][RTW89_ETSI][0] = 12, + [0][1][RTW89_MKK][0] = 14, + [0][1][RTW89_IC][0] = 8, + [0][1][RTW89_KCC][0] = 32, + [0][1][RTW89_ACMA][0] = 12, + [0][1][RTW89_CHILE][0] = 18, + [0][1][RTW89_UKRAINE][0] = 12, + [0][1][RTW89_MEXICO][0] = 36, + [0][1][RTW89_CN][0] = 12, + [0][1][RTW89_QATAR][0] = 12, + [0][1][RTW89_FCC][2] = 36, + [0][1][RTW89_ETSI][2] = 12, + [0][1][RTW89_MKK][2] = 14, + [0][1][RTW89_IC][2] = 8, + [0][1][RTW89_KCC][2] = 32, + [0][1][RTW89_ACMA][2] = 12, + [0][1][RTW89_CHILE][2] = 18, + [0][1][RTW89_UKRAINE][2] = 12, + [0][1][RTW89_MEXICO][2] = 36, + [0][1][RTW89_CN][2] = 12, + [0][1][RTW89_QATAR][2] = 12, + [0][1][RTW89_FCC][4] = 36, + [0][1][RTW89_ETSI][4] = 12, + [0][1][RTW89_MKK][4] = 14, + [0][1][RTW89_IC][4] = 8, + [0][1][RTW89_KCC][4] = 32, + [0][1][RTW89_ACMA][4] = 12, + [0][1][RTW89_CHILE][4] = 18, + [0][1][RTW89_UKRAINE][4] = 12, + [0][1][RTW89_MEXICO][4] = 36, + [0][1][RTW89_CN][4] = 12, + [0][1][RTW89_QATAR][4] = 12, + [0][1][RTW89_FCC][6] = 36, + [0][1][RTW89_ETSI][6] = 12, + [0][1][RTW89_MKK][6] = 14, + [0][1][RTW89_IC][6] = 8, + [0][1][RTW89_KCC][6] = 4, + [0][1][RTW89_ACMA][6] = 12, + [0][1][RTW89_CHILE][6] = 18, + [0][1][RTW89_UKRAINE][6] = 12, + [0][1][RTW89_MEXICO][6] = 36, + [0][1][RTW89_CN][6] = 12, + [0][1][RTW89_QATAR][6] = 12, + [0][1][RTW89_FCC][8] = 36, + [0][1][RTW89_ETSI][8] = 12, + [0][1][RTW89_MKK][8] = 14, + [0][1][RTW89_IC][8] = 36, + [0][1][RTW89_KCC][8] = 32, + [0][1][RTW89_ACMA][8] = 12, + [0][1][RTW89_CHILE][8] = 36, + [0][1][RTW89_UKRAINE][8] = 12, + [0][1][RTW89_MEXICO][8] = 36, + [0][1][RTW89_CN][8] = 12, + [0][1][RTW89_QATAR][8] = 12, + [0][1][RTW89_FCC][10] = 36, + [0][1][RTW89_ETSI][10] = 12, + [0][1][RTW89_MKK][10] = 14, + [0][1][RTW89_IC][10] = 36, + [0][1][RTW89_KCC][10] = 32, + [0][1][RTW89_ACMA][10] = 12, + [0][1][RTW89_CHILE][10] = 36, + [0][1][RTW89_UKRAINE][10] = 12, + [0][1][RTW89_MEXICO][10] = 36, + [0][1][RTW89_CN][10] = 12, + [0][1][RTW89_QATAR][10] = 12, + [0][1][RTW89_FCC][12] = 36, + [0][1][RTW89_ETSI][12] = 12, + [0][1][RTW89_MKK][12] = 14, + [0][1][RTW89_IC][12] = 36, + [0][1][RTW89_KCC][12] = 32, + [0][1][RTW89_ACMA][12] = 12, + [0][1][RTW89_CHILE][12] = 36, + [0][1][RTW89_UKRAINE][12] = 12, + [0][1][RTW89_MEXICO][12] = 36, + [0][1][RTW89_CN][12] = 12, + [0][1][RTW89_QATAR][12] = 12, + [0][1][RTW89_FCC][14] = 36, + [0][1][RTW89_ETSI][14] = 12, + [0][1][RTW89_MKK][14] = 14, + [0][1][RTW89_IC][14] = 36, + [0][1][RTW89_KCC][14] = 32, + [0][1][RTW89_ACMA][14] = 12, + [0][1][RTW89_CHILE][14] = 36, + [0][1][RTW89_UKRAINE][14] = 12, + [0][1][RTW89_MEXICO][14] = 36, + [0][1][RTW89_CN][14] = 12, + [0][1][RTW89_QATAR][14] = 12, + [0][1][RTW89_FCC][15] = 36, + [0][1][RTW89_ETSI][15] = 12, + [0][1][RTW89_MKK][15] = 32, + [0][1][RTW89_IC][15] = 36, + [0][1][RTW89_KCC][15] = 32, + [0][1][RTW89_ACMA][15] = 12, + [0][1][RTW89_CHILE][15] = 36, + [0][1][RTW89_UKRAINE][15] = 12, + [0][1][RTW89_MEXICO][15] = 36, + [0][1][RTW89_CN][15] = 127, + [0][1][RTW89_QATAR][15] = 12, + [0][1][RTW89_FCC][17] = 36, + [0][1][RTW89_ETSI][17] = 12, + [0][1][RTW89_MKK][17] = 32, + [0][1][RTW89_IC][17] = 36, + [0][1][RTW89_KCC][17] = 32, + [0][1][RTW89_ACMA][17] = 12, + [0][1][RTW89_CHILE][17] = 36, + [0][1][RTW89_UKRAINE][17] = 12, + [0][1][RTW89_MEXICO][17] = 36, + [0][1][RTW89_CN][17] = 127, + [0][1][RTW89_QATAR][17] = 12, + [0][1][RTW89_FCC][19] = 36, + [0][1][RTW89_ETSI][19] = 12, + [0][1][RTW89_MKK][19] = 32, + [0][1][RTW89_IC][19] = 36, + [0][1][RTW89_KCC][19] = 32, + [0][1][RTW89_ACMA][19] = 12, + [0][1][RTW89_CHILE][19] = 36, + [0][1][RTW89_UKRAINE][19] = 12, + [0][1][RTW89_MEXICO][19] = 36, + [0][1][RTW89_CN][19] = 127, + [0][1][RTW89_QATAR][19] = 12, + [0][1][RTW89_FCC][21] = 36, + [0][1][RTW89_ETSI][21] = 12, + [0][1][RTW89_MKK][21] = 32, + [0][1][RTW89_IC][21] = 36, + [0][1][RTW89_KCC][21] = 32, + [0][1][RTW89_ACMA][21] = 12, + [0][1][RTW89_CHILE][21] = 36, + [0][1][RTW89_UKRAINE][21] = 12, + [0][1][RTW89_MEXICO][21] = 36, + [0][1][RTW89_CN][21] = 127, + [0][1][RTW89_QATAR][21] = 12, + [0][1][RTW89_FCC][23] = 36, + [0][1][RTW89_ETSI][23] = 12, + [0][1][RTW89_MKK][23] = 32, + [0][1][RTW89_IC][23] = 36, + [0][1][RTW89_KCC][23] = 32, + [0][1][RTW89_ACMA][23] = 12, + [0][1][RTW89_CHILE][23] = 36, + [0][1][RTW89_UKRAINE][23] = 12, + [0][1][RTW89_MEXICO][23] = 36, + [0][1][RTW89_CN][23] = 127, + [0][1][RTW89_QATAR][23] = 12, + [0][1][RTW89_FCC][25] = 36, + [0][1][RTW89_ETSI][25] = 12, + [0][1][RTW89_MKK][25] = 32, + [0][1][RTW89_IC][25] = 127, + [0][1][RTW89_KCC][25] = 32, + [0][1][RTW89_ACMA][25] = 127, + [0][1][RTW89_CHILE][25] = 36, + [0][1][RTW89_UKRAINE][25] = 12, + [0][1][RTW89_MEXICO][25] = 36, + [0][1][RTW89_CN][25] = 127, + [0][1][RTW89_QATAR][25] = 12, + [0][1][RTW89_FCC][27] = 36, + [0][1][RTW89_ETSI][27] = 12, + [0][1][RTW89_MKK][27] = 32, + [0][1][RTW89_IC][27] = 127, + [0][1][RTW89_KCC][27] = 32, + [0][1][RTW89_ACMA][27] = 127, + [0][1][RTW89_CHILE][27] = 36, + [0][1][RTW89_UKRAINE][27] = 12, + [0][1][RTW89_MEXICO][27] = 36, + [0][1][RTW89_CN][27] = 127, + [0][1][RTW89_QATAR][27] = 12, + [0][1][RTW89_FCC][29] = 36, + [0][1][RTW89_ETSI][29] = 12, + [0][1][RTW89_MKK][29] = 32, + [0][1][RTW89_IC][29] = 127, + [0][1][RTW89_KCC][29] = 32, + [0][1][RTW89_ACMA][29] = 127, + [0][1][RTW89_CHILE][29] = 36, + [0][1][RTW89_UKRAINE][29] = 12, + [0][1][RTW89_MEXICO][29] = 36, + [0][1][RTW89_CN][29] = 127, + [0][1][RTW89_QATAR][29] = 12, + [0][1][RTW89_FCC][31] = 36, + [0][1][RTW89_ETSI][31] = 12, + [0][1][RTW89_MKK][31] = 32, + [0][1][RTW89_IC][31] = 36, + [0][1][RTW89_KCC][31] = 32, + [0][1][RTW89_ACMA][31] = 12, + [0][1][RTW89_CHILE][31] = 36, + [0][1][RTW89_UKRAINE][31] = 12, + [0][1][RTW89_MEXICO][31] = 36, + [0][1][RTW89_CN][31] = 127, + [0][1][RTW89_QATAR][31] = 12, + [0][1][RTW89_FCC][33] = 36, + [0][1][RTW89_ETSI][33] = 12, + [0][1][RTW89_MKK][33] = 32, + [0][1][RTW89_IC][33] = 36, + [0][1][RTW89_KCC][33] = 32, + [0][1][RTW89_ACMA][33] = 12, + [0][1][RTW89_CHILE][33] = 36, + [0][1][RTW89_UKRAINE][33] = 12, + [0][1][RTW89_MEXICO][33] = 36, + [0][1][RTW89_CN][33] = 127, + [0][1][RTW89_QATAR][33] = 12, + [0][1][RTW89_FCC][35] = 36, + [0][1][RTW89_ETSI][35] = 12, + [0][1][RTW89_MKK][35] = 32, + [0][1][RTW89_IC][35] = 36, + [0][1][RTW89_KCC][35] = 32, + [0][1][RTW89_ACMA][35] = 12, + [0][1][RTW89_CHILE][35] = 36, + [0][1][RTW89_UKRAINE][35] = 12, + [0][1][RTW89_MEXICO][35] = 36, + [0][1][RTW89_CN][35] = 127, + [0][1][RTW89_QATAR][35] = 12, + [0][1][RTW89_FCC][37] = 36, + [0][1][RTW89_ETSI][37] = 127, + [0][1][RTW89_MKK][37] = 32, + [0][1][RTW89_IC][37] = 36, + [0][1][RTW89_KCC][37] = 32, + [0][1][RTW89_ACMA][37] = 36, + [0][1][RTW89_CHILE][37] = 36, + [0][1][RTW89_UKRAINE][37] = 127, + [0][1][RTW89_MEXICO][37] = 36, + [0][1][RTW89_CN][37] = 127, + [0][1][RTW89_QATAR][37] = 127, + [0][1][RTW89_FCC][38] = 72, + [0][1][RTW89_ETSI][38] = 16, + [0][1][RTW89_MKK][38] = 127, + [0][1][RTW89_IC][38] = 72, + [0][1][RTW89_KCC][38] = 32, + [0][1][RTW89_ACMA][38] = 76, + [0][1][RTW89_CHILE][38] = 42, + [0][1][RTW89_UKRAINE][38] = 16, + [0][1][RTW89_MEXICO][38] = 72, + [0][1][RTW89_CN][38] = 50, + [0][1][RTW89_QATAR][38] = 16, + [0][1][RTW89_FCC][40] = 76, + [0][1][RTW89_ETSI][40] = 16, + [0][1][RTW89_MKK][40] = 127, + [0][1][RTW89_IC][40] = 76, + [0][1][RTW89_KCC][40] = 32, + [0][1][RTW89_ACMA][40] = 76, + [0][1][RTW89_CHILE][40] = 42, + [0][1][RTW89_UKRAINE][40] = 16, + [0][1][RTW89_MEXICO][40] = 76, + [0][1][RTW89_CN][40] = 50, + [0][1][RTW89_QATAR][40] = 16, + [0][1][RTW89_FCC][42] = 76, + [0][1][RTW89_ETSI][42] = 16, + [0][1][RTW89_MKK][42] = 127, + [0][1][RTW89_IC][42] = 76, + [0][1][RTW89_KCC][42] = 32, + [0][1][RTW89_ACMA][42] = 76, + [0][1][RTW89_CHILE][42] = 42, + [0][1][RTW89_UKRAINE][42] = 16, + [0][1][RTW89_MEXICO][42] = 76, + [0][1][RTW89_CN][42] = 50, + [0][1][RTW89_QATAR][42] = 16, + [0][1][RTW89_FCC][44] = 76, + [0][1][RTW89_ETSI][44] = 16, + [0][1][RTW89_MKK][44] = 127, + [0][1][RTW89_IC][44] = 76, + [0][1][RTW89_KCC][44] = 32, + [0][1][RTW89_ACMA][44] = 76, + [0][1][RTW89_CHILE][44] = 42, + [0][1][RTW89_UKRAINE][44] = 16, + [0][1][RTW89_MEXICO][44] = 76, + [0][1][RTW89_CN][44] = 50, + [0][1][RTW89_QATAR][44] = 16, + [0][1][RTW89_FCC][46] = 76, + [0][1][RTW89_ETSI][46] = 16, + [0][1][RTW89_MKK][46] = 127, + [0][1][RTW89_IC][46] = 76, + [0][1][RTW89_KCC][46] = 32, + [0][1][RTW89_ACMA][46] = 76, + [0][1][RTW89_CHILE][46] = 42, + [0][1][RTW89_UKRAINE][46] = 16, + [0][1][RTW89_MEXICO][46] = 76, + [0][1][RTW89_CN][46] = 50, + [0][1][RTW89_QATAR][46] = 16, + [1][0][RTW89_FCC][0] = 62, + [1][0][RTW89_ETSI][0] = 36, + [1][0][RTW89_MKK][0] = 36, + [1][0][RTW89_IC][0] = 34, + [1][0][RTW89_KCC][0] = 58, + [1][0][RTW89_ACMA][0] = 36, + [1][0][RTW89_CHILE][0] = 30, + [1][0][RTW89_UKRAINE][0] = 36, + [1][0][RTW89_MEXICO][0] = 62, + [1][0][RTW89_CN][0] = 36, + [1][0][RTW89_QATAR][0] = 36, + [1][0][RTW89_FCC][2] = 62, + [1][0][RTW89_ETSI][2] = 36, + [1][0][RTW89_MKK][2] = 36, + [1][0][RTW89_IC][2] = 34, + [1][0][RTW89_KCC][2] = 58, + [1][0][RTW89_ACMA][2] = 36, + [1][0][RTW89_CHILE][2] = 30, + [1][0][RTW89_UKRAINE][2] = 36, + [1][0][RTW89_MEXICO][2] = 62, + [1][0][RTW89_CN][2] = 36, + [1][0][RTW89_QATAR][2] = 36, + [1][0][RTW89_FCC][4] = 62, + [1][0][RTW89_ETSI][4] = 36, + [1][0][RTW89_MKK][4] = 36, + [1][0][RTW89_IC][4] = 34, + [1][0][RTW89_KCC][4] = 58, + [1][0][RTW89_ACMA][4] = 36, + [1][0][RTW89_CHILE][4] = 30, + [1][0][RTW89_UKRAINE][4] = 36, + [1][0][RTW89_MEXICO][4] = 62, + [1][0][RTW89_CN][4] = 36, + [1][0][RTW89_QATAR][4] = 36, + [1][0][RTW89_FCC][6] = 62, + [1][0][RTW89_ETSI][6] = 36, + [1][0][RTW89_MKK][6] = 36, + [1][0][RTW89_IC][6] = 34, + [1][0][RTW89_KCC][6] = 32, + [1][0][RTW89_ACMA][6] = 36, + [1][0][RTW89_CHILE][6] = 30, + [1][0][RTW89_UKRAINE][6] = 36, + [1][0][RTW89_MEXICO][6] = 62, + [1][0][RTW89_CN][6] = 36, + [1][0][RTW89_QATAR][6] = 36, + [1][0][RTW89_FCC][8] = 62, + [1][0][RTW89_ETSI][8] = 36, + [1][0][RTW89_MKK][8] = 36, + [1][0][RTW89_IC][8] = 62, + [1][0][RTW89_KCC][8] = 58, + [1][0][RTW89_ACMA][8] = 36, + [1][0][RTW89_CHILE][8] = 54, + [1][0][RTW89_UKRAINE][8] = 36, + [1][0][RTW89_MEXICO][8] = 62, + [1][0][RTW89_CN][8] = 36, + [1][0][RTW89_QATAR][8] = 36, + [1][0][RTW89_FCC][10] = 62, + [1][0][RTW89_ETSI][10] = 36, + [1][0][RTW89_MKK][10] = 36, + [1][0][RTW89_IC][10] = 62, + [1][0][RTW89_KCC][10] = 58, + [1][0][RTW89_ACMA][10] = 36, + [1][0][RTW89_CHILE][10] = 54, + [1][0][RTW89_UKRAINE][10] = 36, + [1][0][RTW89_MEXICO][10] = 62, + [1][0][RTW89_CN][10] = 36, + [1][0][RTW89_QATAR][10] = 36, + [1][0][RTW89_FCC][12] = 62, + [1][0][RTW89_ETSI][12] = 36, + [1][0][RTW89_MKK][12] = 36, + [1][0][RTW89_IC][12] = 62, + [1][0][RTW89_KCC][12] = 58, + [1][0][RTW89_ACMA][12] = 36, + [1][0][RTW89_CHILE][12] = 54, + [1][0][RTW89_UKRAINE][12] = 36, + [1][0][RTW89_MEXICO][12] = 62, + [1][0][RTW89_CN][12] = 36, + [1][0][RTW89_QATAR][12] = 36, + [1][0][RTW89_FCC][14] = 62, + [1][0][RTW89_ETSI][14] = 36, + [1][0][RTW89_MKK][14] = 36, + [1][0][RTW89_IC][14] = 62, + [1][0][RTW89_KCC][14] = 58, + [1][0][RTW89_ACMA][14] = 36, + [1][0][RTW89_CHILE][14] = 54, + [1][0][RTW89_UKRAINE][14] = 36, + [1][0][RTW89_MEXICO][14] = 62, + [1][0][RTW89_CN][14] = 36, + [1][0][RTW89_QATAR][14] = 36, + [1][0][RTW89_FCC][15] = 62, + [1][0][RTW89_ETSI][15] = 36, + [1][0][RTW89_MKK][15] = 58, + [1][0][RTW89_IC][15] = 62, + [1][0][RTW89_KCC][15] = 58, + [1][0][RTW89_ACMA][15] = 36, + [1][0][RTW89_CHILE][15] = 54, + [1][0][RTW89_UKRAINE][15] = 36, + [1][0][RTW89_MEXICO][15] = 62, + [1][0][RTW89_CN][15] = 127, + [1][0][RTW89_QATAR][15] = 36, + [1][0][RTW89_FCC][17] = 62, + [1][0][RTW89_ETSI][17] = 36, + [1][0][RTW89_MKK][17] = 58, + [1][0][RTW89_IC][17] = 62, + [1][0][RTW89_KCC][17] = 58, + [1][0][RTW89_ACMA][17] = 36, + [1][0][RTW89_CHILE][17] = 54, + [1][0][RTW89_UKRAINE][17] = 36, + [1][0][RTW89_MEXICO][17] = 62, + [1][0][RTW89_CN][17] = 127, + [1][0][RTW89_QATAR][17] = 36, + [1][0][RTW89_FCC][19] = 62, + [1][0][RTW89_ETSI][19] = 36, + [1][0][RTW89_MKK][19] = 58, + [1][0][RTW89_IC][19] = 62, + [1][0][RTW89_KCC][19] = 58, + [1][0][RTW89_ACMA][19] = 36, + [1][0][RTW89_CHILE][19] = 54, + [1][0][RTW89_UKRAINE][19] = 36, + [1][0][RTW89_MEXICO][19] = 62, + [1][0][RTW89_CN][19] = 127, + [1][0][RTW89_QATAR][19] = 36, + [1][0][RTW89_FCC][21] = 62, + [1][0][RTW89_ETSI][21] = 36, + [1][0][RTW89_MKK][21] = 58, + [1][0][RTW89_IC][21] = 62, + [1][0][RTW89_KCC][21] = 58, + [1][0][RTW89_ACMA][21] = 36, + [1][0][RTW89_CHILE][21] = 54, + [1][0][RTW89_UKRAINE][21] = 36, + [1][0][RTW89_MEXICO][21] = 62, + [1][0][RTW89_CN][21] = 127, + [1][0][RTW89_QATAR][21] = 36, + [1][0][RTW89_FCC][23] = 62, + [1][0][RTW89_ETSI][23] = 36, + [1][0][RTW89_MKK][23] = 58, + [1][0][RTW89_IC][23] = 62, + [1][0][RTW89_KCC][23] = 58, + [1][0][RTW89_ACMA][23] = 36, + [1][0][RTW89_CHILE][23] = 54, + [1][0][RTW89_UKRAINE][23] = 36, + [1][0][RTW89_MEXICO][23] = 62, + [1][0][RTW89_CN][23] = 127, + [1][0][RTW89_QATAR][23] = 36, + [1][0][RTW89_FCC][25] = 62, + [1][0][RTW89_ETSI][25] = 36, + [1][0][RTW89_MKK][25] = 58, + [1][0][RTW89_IC][25] = 127, + [1][0][RTW89_KCC][25] = 58, + [1][0][RTW89_ACMA][25] = 127, + [1][0][RTW89_CHILE][25] = 54, + [1][0][RTW89_UKRAINE][25] = 36, + [1][0][RTW89_MEXICO][25] = 62, + [1][0][RTW89_CN][25] = 127, + [1][0][RTW89_QATAR][25] = 36, + [1][0][RTW89_FCC][27] = 62, + [1][0][RTW89_ETSI][27] = 36, + [1][0][RTW89_MKK][27] = 58, + [1][0][RTW89_IC][27] = 127, + [1][0][RTW89_KCC][27] = 58, + [1][0][RTW89_ACMA][27] = 127, + [1][0][RTW89_CHILE][27] = 54, + [1][0][RTW89_UKRAINE][27] = 36, + [1][0][RTW89_MEXICO][27] = 62, + [1][0][RTW89_CN][27] = 127, + [1][0][RTW89_QATAR][27] = 36, + [1][0][RTW89_FCC][29] = 62, + [1][0][RTW89_ETSI][29] = 36, + [1][0][RTW89_MKK][29] = 58, + [1][0][RTW89_IC][29] = 127, + [1][0][RTW89_KCC][29] = 58, + [1][0][RTW89_ACMA][29] = 127, + [1][0][RTW89_CHILE][29] = 54, + [1][0][RTW89_UKRAINE][29] = 36, + [1][0][RTW89_MEXICO][29] = 62, + [1][0][RTW89_CN][29] = 127, + [1][0][RTW89_QATAR][29] = 36, + [1][0][RTW89_FCC][31] = 62, + [1][0][RTW89_ETSI][31] = 36, + [1][0][RTW89_MKK][31] = 58, + [1][0][RTW89_IC][31] = 62, + [1][0][RTW89_KCC][31] = 58, + [1][0][RTW89_ACMA][31] = 36, + [1][0][RTW89_CHILE][31] = 54, + [1][0][RTW89_UKRAINE][31] = 36, + [1][0][RTW89_MEXICO][31] = 62, + [1][0][RTW89_CN][31] = 127, + [1][0][RTW89_QATAR][31] = 36, + [1][0][RTW89_FCC][33] = 62, + [1][0][RTW89_ETSI][33] = 36, + [1][0][RTW89_MKK][33] = 58, + [1][0][RTW89_IC][33] = 62, + [1][0][RTW89_KCC][33] = 58, + [1][0][RTW89_ACMA][33] = 36, + [1][0][RTW89_CHILE][33] = 54, + [1][0][RTW89_UKRAINE][33] = 36, + [1][0][RTW89_MEXICO][33] = 62, + [1][0][RTW89_CN][33] = 127, + [1][0][RTW89_QATAR][33] = 36, + [1][0][RTW89_FCC][35] = 62, + [1][0][RTW89_ETSI][35] = 36, + [1][0][RTW89_MKK][35] = 58, + [1][0][RTW89_IC][35] = 62, + [1][0][RTW89_KCC][35] = 58, + [1][0][RTW89_ACMA][35] = 36, + [1][0][RTW89_CHILE][35] = 54, + [1][0][RTW89_UKRAINE][35] = 36, + [1][0][RTW89_MEXICO][35] = 62, + [1][0][RTW89_CN][35] = 127, + [1][0][RTW89_QATAR][35] = 36, + [1][0][RTW89_FCC][37] = 62, + [1][0][RTW89_ETSI][37] = 127, + [1][0][RTW89_MKK][37] = 58, + [1][0][RTW89_IC][37] = 62, + [1][0][RTW89_KCC][37] = 58, + [1][0][RTW89_ACMA][37] = 62, + [1][0][RTW89_CHILE][37] = 54, + [1][0][RTW89_UKRAINE][37] = 127, + [1][0][RTW89_MEXICO][37] = 62, + [1][0][RTW89_CN][37] = 127, + [1][0][RTW89_QATAR][37] = 127, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_ETSI][38] = 28, + [1][0][RTW89_MKK][38] = 127, + [1][0][RTW89_IC][38] = 76, + [1][0][RTW89_KCC][38] = 58, + [1][0][RTW89_ACMA][38] = 76, + [1][0][RTW89_CHILE][38] = 54, + [1][0][RTW89_UKRAINE][38] = 28, + [1][0][RTW89_MEXICO][38] = 76, + [1][0][RTW89_CN][38] = 74, + [1][0][RTW89_QATAR][38] = 28, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_ETSI][40] = 28, + [1][0][RTW89_MKK][40] = 127, + [1][0][RTW89_IC][40] = 76, + [1][0][RTW89_KCC][40] = 58, + [1][0][RTW89_ACMA][40] = 76, + [1][0][RTW89_CHILE][40] = 54, + [1][0][RTW89_UKRAINE][40] = 28, + [1][0][RTW89_MEXICO][40] = 76, + [1][0][RTW89_CN][40] = 74, + [1][0][RTW89_QATAR][40] = 28, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_ETSI][42] = 28, + [1][0][RTW89_MKK][42] = 127, + [1][0][RTW89_IC][42] = 76, + [1][0][RTW89_KCC][42] = 58, + [1][0][RTW89_ACMA][42] = 76, + [1][0][RTW89_CHILE][42] = 54, + [1][0][RTW89_UKRAINE][42] = 28, + [1][0][RTW89_MEXICO][42] = 76, + [1][0][RTW89_CN][42] = 74, + [1][0][RTW89_QATAR][42] = 28, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_ETSI][44] = 28, + [1][0][RTW89_MKK][44] = 127, + [1][0][RTW89_IC][44] = 76, + [1][0][RTW89_KCC][44] = 58, + [1][0][RTW89_ACMA][44] = 76, + [1][0][RTW89_CHILE][44] = 54, + [1][0][RTW89_UKRAINE][44] = 28, + [1][0][RTW89_MEXICO][44] = 76, + [1][0][RTW89_CN][44] = 74, + [1][0][RTW89_QATAR][44] = 28, + [1][0][RTW89_FCC][46] = 76, + [1][0][RTW89_ETSI][46] = 28, + [1][0][RTW89_MKK][46] = 127, + [1][0][RTW89_IC][46] = 76, + [1][0][RTW89_KCC][46] = 58, + [1][0][RTW89_ACMA][46] = 76, + [1][0][RTW89_CHILE][46] = 54, + [1][0][RTW89_UKRAINE][46] = 28, + [1][0][RTW89_MEXICO][46] = 76, + [1][0][RTW89_CN][46] = 74, + [1][0][RTW89_QATAR][46] = 28, + [1][1][RTW89_FCC][0] = 46, + [1][1][RTW89_ETSI][0] = 22, + [1][1][RTW89_MKK][0] = 24, + [1][1][RTW89_IC][0] = 18, + [1][1][RTW89_KCC][0] = 44, + [1][1][RTW89_ACMA][0] = 22, + [1][1][RTW89_CHILE][0] = 18, + [1][1][RTW89_UKRAINE][0] = 22, + [1][1][RTW89_MEXICO][0] = 46, + [1][1][RTW89_CN][0] = 22, + [1][1][RTW89_QATAR][0] = 22, + [1][1][RTW89_FCC][2] = 46, + [1][1][RTW89_ETSI][2] = 22, + [1][1][RTW89_MKK][2] = 24, + [1][1][RTW89_IC][2] = 18, + [1][1][RTW89_KCC][2] = 44, + [1][1][RTW89_ACMA][2] = 22, + [1][1][RTW89_CHILE][2] = 18, + [1][1][RTW89_UKRAINE][2] = 22, + [1][1][RTW89_MEXICO][2] = 46, + [1][1][RTW89_CN][2] = 22, + [1][1][RTW89_QATAR][2] = 22, + [1][1][RTW89_FCC][4] = 46, + [1][1][RTW89_ETSI][4] = 22, + [1][1][RTW89_MKK][4] = 24, + [1][1][RTW89_IC][4] = 18, + [1][1][RTW89_KCC][4] = 44, + [1][1][RTW89_ACMA][4] = 22, + [1][1][RTW89_CHILE][4] = 18, + [1][1][RTW89_UKRAINE][4] = 22, + [1][1][RTW89_MEXICO][4] = 46, + [1][1][RTW89_CN][4] = 22, + [1][1][RTW89_QATAR][4] = 22, + [1][1][RTW89_FCC][6] = 46, + [1][1][RTW89_ETSI][6] = 22, + [1][1][RTW89_MKK][6] = 24, + [1][1][RTW89_IC][6] = 18, + [1][1][RTW89_KCC][6] = 16, + [1][1][RTW89_ACMA][6] = 22, + [1][1][RTW89_CHILE][6] = 18, + [1][1][RTW89_UKRAINE][6] = 22, + [1][1][RTW89_MEXICO][6] = 46, + [1][1][RTW89_CN][6] = 22, + [1][1][RTW89_QATAR][6] = 22, + [1][1][RTW89_FCC][8] = 46, + [1][1][RTW89_ETSI][8] = 22, + [1][1][RTW89_MKK][8] = 24, + [1][1][RTW89_IC][8] = 46, + [1][1][RTW89_KCC][8] = 44, + [1][1][RTW89_ACMA][8] = 22, + [1][1][RTW89_CHILE][8] = 42, + [1][1][RTW89_UKRAINE][8] = 22, + [1][1][RTW89_MEXICO][8] = 46, + [1][1][RTW89_CN][8] = 22, + [1][1][RTW89_QATAR][8] = 22, + [1][1][RTW89_FCC][10] = 46, + [1][1][RTW89_ETSI][10] = 22, + [1][1][RTW89_MKK][10] = 24, + [1][1][RTW89_IC][10] = 46, + [1][1][RTW89_KCC][10] = 44, + [1][1][RTW89_ACMA][10] = 22, + [1][1][RTW89_CHILE][10] = 42, + [1][1][RTW89_UKRAINE][10] = 22, + [1][1][RTW89_MEXICO][10] = 46, + [1][1][RTW89_CN][10] = 22, + [1][1][RTW89_QATAR][10] = 22, + [1][1][RTW89_FCC][12] = 46, + [1][1][RTW89_ETSI][12] = 22, + [1][1][RTW89_MKK][12] = 24, + [1][1][RTW89_IC][12] = 46, + [1][1][RTW89_KCC][12] = 44, + [1][1][RTW89_ACMA][12] = 22, + [1][1][RTW89_CHILE][12] = 42, + [1][1][RTW89_UKRAINE][12] = 22, + [1][1][RTW89_MEXICO][12] = 46, + [1][1][RTW89_CN][12] = 22, + [1][1][RTW89_QATAR][12] = 22, + [1][1][RTW89_FCC][14] = 46, + [1][1][RTW89_ETSI][14] = 22, + [1][1][RTW89_MKK][14] = 24, + [1][1][RTW89_IC][14] = 46, + [1][1][RTW89_KCC][14] = 44, + [1][1][RTW89_ACMA][14] = 22, + [1][1][RTW89_CHILE][14] = 42, + [1][1][RTW89_UKRAINE][14] = 22, + [1][1][RTW89_MEXICO][14] = 46, + [1][1][RTW89_CN][14] = 22, + [1][1][RTW89_QATAR][14] = 22, + [1][1][RTW89_FCC][15] = 46, + [1][1][RTW89_ETSI][15] = 22, + [1][1][RTW89_MKK][15] = 46, + [1][1][RTW89_IC][15] = 46, + [1][1][RTW89_KCC][15] = 44, + [1][1][RTW89_ACMA][15] = 22, + [1][1][RTW89_CHILE][15] = 42, + [1][1][RTW89_UKRAINE][15] = 22, + [1][1][RTW89_MEXICO][15] = 46, + [1][1][RTW89_CN][15] = 127, + [1][1][RTW89_QATAR][15] = 22, + [1][1][RTW89_FCC][17] = 46, + [1][1][RTW89_ETSI][17] = 22, + [1][1][RTW89_MKK][17] = 46, + [1][1][RTW89_IC][17] = 46, + [1][1][RTW89_KCC][17] = 44, + [1][1][RTW89_ACMA][17] = 22, + [1][1][RTW89_CHILE][17] = 42, + [1][1][RTW89_UKRAINE][17] = 22, + [1][1][RTW89_MEXICO][17] = 46, + [1][1][RTW89_CN][17] = 127, + [1][1][RTW89_QATAR][17] = 22, + [1][1][RTW89_FCC][19] = 46, + [1][1][RTW89_ETSI][19] = 22, + [1][1][RTW89_MKK][19] = 46, + [1][1][RTW89_IC][19] = 46, + [1][1][RTW89_KCC][19] = 44, + [1][1][RTW89_ACMA][19] = 22, + [1][1][RTW89_CHILE][19] = 42, + [1][1][RTW89_UKRAINE][19] = 22, + [1][1][RTW89_MEXICO][19] = 46, + [1][1][RTW89_CN][19] = 127, + [1][1][RTW89_QATAR][19] = 22, + [1][1][RTW89_FCC][21] = 46, + [1][1][RTW89_ETSI][21] = 22, + [1][1][RTW89_MKK][21] = 46, + [1][1][RTW89_IC][21] = 46, + [1][1][RTW89_KCC][21] = 44, + [1][1][RTW89_ACMA][21] = 22, + [1][1][RTW89_CHILE][21] = 42, + [1][1][RTW89_UKRAINE][21] = 22, + [1][1][RTW89_MEXICO][21] = 46, + [1][1][RTW89_CN][21] = 127, + [1][1][RTW89_QATAR][21] = 22, + [1][1][RTW89_FCC][23] = 46, + [1][1][RTW89_ETSI][23] = 22, + [1][1][RTW89_MKK][23] = 46, + [1][1][RTW89_IC][23] = 46, + [1][1][RTW89_KCC][23] = 44, + [1][1][RTW89_ACMA][23] = 22, + [1][1][RTW89_CHILE][23] = 42, + [1][1][RTW89_UKRAINE][23] = 22, + [1][1][RTW89_MEXICO][23] = 46, + [1][1][RTW89_CN][23] = 127, + [1][1][RTW89_QATAR][23] = 22, + [1][1][RTW89_FCC][25] = 46, + [1][1][RTW89_ETSI][25] = 22, + [1][1][RTW89_MKK][25] = 46, + [1][1][RTW89_IC][25] = 127, + [1][1][RTW89_KCC][25] = 44, + [1][1][RTW89_ACMA][25] = 127, + [1][1][RTW89_CHILE][25] = 42, + [1][1][RTW89_UKRAINE][25] = 22, + [1][1][RTW89_MEXICO][25] = 46, + [1][1][RTW89_CN][25] = 127, + [1][1][RTW89_QATAR][25] = 22, + [1][1][RTW89_FCC][27] = 46, + [1][1][RTW89_ETSI][27] = 22, + [1][1][RTW89_MKK][27] = 46, + [1][1][RTW89_IC][27] = 127, + [1][1][RTW89_KCC][27] = 44, + [1][1][RTW89_ACMA][27] = 127, + [1][1][RTW89_CHILE][27] = 42, + [1][1][RTW89_UKRAINE][27] = 22, + [1][1][RTW89_MEXICO][27] = 46, + [1][1][RTW89_CN][27] = 127, + [1][1][RTW89_QATAR][27] = 22, + [1][1][RTW89_FCC][29] = 46, + [1][1][RTW89_ETSI][29] = 22, + [1][1][RTW89_MKK][29] = 46, + [1][1][RTW89_IC][29] = 127, + [1][1][RTW89_KCC][29] = 44, + [1][1][RTW89_ACMA][29] = 127, + [1][1][RTW89_CHILE][29] = 42, + [1][1][RTW89_UKRAINE][29] = 22, + [1][1][RTW89_MEXICO][29] = 46, + [1][1][RTW89_CN][29] = 127, + [1][1][RTW89_QATAR][29] = 22, + [1][1][RTW89_FCC][31] = 46, + [1][1][RTW89_ETSI][31] = 22, + [1][1][RTW89_MKK][31] = 46, + [1][1][RTW89_IC][31] = 46, + [1][1][RTW89_KCC][31] = 44, + [1][1][RTW89_ACMA][31] = 22, + [1][1][RTW89_CHILE][31] = 42, + [1][1][RTW89_UKRAINE][31] = 22, + [1][1][RTW89_MEXICO][31] = 46, + [1][1][RTW89_CN][31] = 127, + [1][1][RTW89_QATAR][31] = 22, + [1][1][RTW89_FCC][33] = 46, + [1][1][RTW89_ETSI][33] = 22, + [1][1][RTW89_MKK][33] = 46, + [1][1][RTW89_IC][33] = 46, + [1][1][RTW89_KCC][33] = 44, + [1][1][RTW89_ACMA][33] = 22, + [1][1][RTW89_CHILE][33] = 42, + [1][1][RTW89_UKRAINE][33] = 22, + [1][1][RTW89_MEXICO][33] = 46, + [1][1][RTW89_CN][33] = 127, + [1][1][RTW89_QATAR][33] = 22, + [1][1][RTW89_FCC][35] = 46, + [1][1][RTW89_ETSI][35] = 22, + [1][1][RTW89_MKK][35] = 46, + [1][1][RTW89_IC][35] = 46, + [1][1][RTW89_KCC][35] = 44, + [1][1][RTW89_ACMA][35] = 22, + [1][1][RTW89_CHILE][35] = 42, + [1][1][RTW89_UKRAINE][35] = 22, + [1][1][RTW89_MEXICO][35] = 46, + [1][1][RTW89_CN][35] = 127, + [1][1][RTW89_QATAR][35] = 22, + [1][1][RTW89_FCC][37] = 46, + [1][1][RTW89_ETSI][37] = 127, + [1][1][RTW89_MKK][37] = 46, + [1][1][RTW89_IC][37] = 46, + [1][1][RTW89_KCC][37] = 44, + [1][1][RTW89_ACMA][37] = 50, + [1][1][RTW89_CHILE][37] = 42, + [1][1][RTW89_UKRAINE][37] = 127, + [1][1][RTW89_MEXICO][37] = 46, + [1][1][RTW89_CN][37] = 127, + [1][1][RTW89_QATAR][37] = 127, + [1][1][RTW89_FCC][38] = 74, + [1][1][RTW89_ETSI][38] = 16, + [1][1][RTW89_MKK][38] = 127, + [1][1][RTW89_IC][38] = 74, + [1][1][RTW89_KCC][38] = 44, + [1][1][RTW89_ACMA][38] = 76, + [1][1][RTW89_CHILE][38] = 42, + [1][1][RTW89_UKRAINE][38] = 16, + [1][1][RTW89_MEXICO][38] = 74, + [1][1][RTW89_CN][38] = 62, + [1][1][RTW89_QATAR][38] = 16, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_ETSI][40] = 16, + [1][1][RTW89_MKK][40] = 127, + [1][1][RTW89_IC][40] = 76, + [1][1][RTW89_KCC][40] = 44, + [1][1][RTW89_ACMA][40] = 76, + [1][1][RTW89_CHILE][40] = 42, + [1][1][RTW89_UKRAINE][40] = 16, + [1][1][RTW89_MEXICO][40] = 76, + [1][1][RTW89_CN][40] = 62, + [1][1][RTW89_QATAR][40] = 16, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_ETSI][42] = 16, + [1][1][RTW89_MKK][42] = 127, + [1][1][RTW89_IC][42] = 76, + [1][1][RTW89_KCC][42] = 44, + [1][1][RTW89_ACMA][42] = 76, + [1][1][RTW89_CHILE][42] = 42, + [1][1][RTW89_UKRAINE][42] = 16, + [1][1][RTW89_MEXICO][42] = 76, + [1][1][RTW89_CN][42] = 62, + [1][1][RTW89_QATAR][42] = 16, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_ETSI][44] = 16, + [1][1][RTW89_MKK][44] = 127, + [1][1][RTW89_IC][44] = 76, + [1][1][RTW89_KCC][44] = 44, + [1][1][RTW89_ACMA][44] = 76, + [1][1][RTW89_CHILE][44] = 42, + [1][1][RTW89_UKRAINE][44] = 16, + [1][1][RTW89_MEXICO][44] = 76, + [1][1][RTW89_CN][44] = 62, + [1][1][RTW89_QATAR][44] = 16, + [1][1][RTW89_FCC][46] = 76, + [1][1][RTW89_ETSI][46] = 16, + [1][1][RTW89_MKK][46] = 127, + [1][1][RTW89_IC][46] = 76, + [1][1][RTW89_KCC][46] = 44, + [1][1][RTW89_ACMA][46] = 76, + [1][1][RTW89_CHILE][46] = 42, + [1][1][RTW89_UKRAINE][46] = 16, + [1][1][RTW89_MEXICO][46] = 76, + [1][1][RTW89_CN][46] = 62, + [1][1][RTW89_QATAR][46] = 16, + [2][0][RTW89_FCC][0] = 74, + [2][0][RTW89_ETSI][0] = 46, + [2][0][RTW89_MKK][0] = 50, + [2][0][RTW89_IC][0] = 46, + [2][0][RTW89_KCC][0] = 70, + [2][0][RTW89_ACMA][0] = 46, + [2][0][RTW89_CHILE][0] = 30, + [2][0][RTW89_UKRAINE][0] = 46, + [2][0][RTW89_MEXICO][0] = 62, + [2][0][RTW89_CN][0] = 46, + [2][0][RTW89_QATAR][0] = 46, + [2][0][RTW89_FCC][2] = 74, + [2][0][RTW89_ETSI][2] = 46, + [2][0][RTW89_MKK][2] = 50, + [2][0][RTW89_IC][2] = 46, + [2][0][RTW89_KCC][2] = 70, + [2][0][RTW89_ACMA][2] = 46, + [2][0][RTW89_CHILE][2] = 30, + [2][0][RTW89_UKRAINE][2] = 46, + [2][0][RTW89_MEXICO][2] = 62, + [2][0][RTW89_CN][2] = 46, + [2][0][RTW89_QATAR][2] = 46, + [2][0][RTW89_FCC][4] = 74, + [2][0][RTW89_ETSI][4] = 46, + [2][0][RTW89_MKK][4] = 50, + [2][0][RTW89_IC][4] = 46, + [2][0][RTW89_KCC][4] = 70, + [2][0][RTW89_ACMA][4] = 46, + [2][0][RTW89_CHILE][4] = 30, + [2][0][RTW89_UKRAINE][4] = 46, + [2][0][RTW89_MEXICO][4] = 62, + [2][0][RTW89_CN][4] = 46, + [2][0][RTW89_QATAR][4] = 46, + [2][0][RTW89_FCC][6] = 74, + [2][0][RTW89_ETSI][6] = 46, + [2][0][RTW89_MKK][6] = 50, + [2][0][RTW89_IC][6] = 46, + [2][0][RTW89_KCC][6] = 44, + [2][0][RTW89_ACMA][6] = 46, + [2][0][RTW89_CHILE][6] = 30, + [2][0][RTW89_UKRAINE][6] = 46, + [2][0][RTW89_MEXICO][6] = 62, + [2][0][RTW89_CN][6] = 46, + [2][0][RTW89_QATAR][6] = 46, + [2][0][RTW89_FCC][8] = 74, + [2][0][RTW89_ETSI][8] = 46, + [2][0][RTW89_MKK][8] = 50, + [2][0][RTW89_IC][8] = 66, + [2][0][RTW89_KCC][8] = 70, + [2][0][RTW89_ACMA][8] = 46, + [2][0][RTW89_CHILE][8] = 54, + [2][0][RTW89_UKRAINE][8] = 46, + [2][0][RTW89_MEXICO][8] = 74, + [2][0][RTW89_CN][8] = 46, + [2][0][RTW89_QATAR][8] = 46, + [2][0][RTW89_FCC][10] = 74, + [2][0][RTW89_ETSI][10] = 46, + [2][0][RTW89_MKK][10] = 50, + [2][0][RTW89_IC][10] = 66, + [2][0][RTW89_KCC][10] = 70, + [2][0][RTW89_ACMA][10] = 46, + [2][0][RTW89_CHILE][10] = 54, + [2][0][RTW89_UKRAINE][10] = 46, + [2][0][RTW89_MEXICO][10] = 74, + [2][0][RTW89_CN][10] = 46, + [2][0][RTW89_QATAR][10] = 46, + [2][0][RTW89_FCC][12] = 74, + [2][0][RTW89_ETSI][12] = 46, + [2][0][RTW89_MKK][12] = 50, + [2][0][RTW89_IC][12] = 66, + [2][0][RTW89_KCC][12] = 70, + [2][0][RTW89_ACMA][12] = 46, + [2][0][RTW89_CHILE][12] = 54, + [2][0][RTW89_UKRAINE][12] = 46, + [2][0][RTW89_MEXICO][12] = 74, + [2][0][RTW89_CN][12] = 46, + [2][0][RTW89_QATAR][12] = 46, + [2][0][RTW89_FCC][14] = 74, + [2][0][RTW89_ETSI][14] = 46, + [2][0][RTW89_MKK][14] = 50, + [2][0][RTW89_IC][14] = 66, + [2][0][RTW89_KCC][14] = 70, + [2][0][RTW89_ACMA][14] = 46, + [2][0][RTW89_CHILE][14] = 54, + [2][0][RTW89_UKRAINE][14] = 46, + [2][0][RTW89_MEXICO][14] = 74, + [2][0][RTW89_CN][14] = 46, + [2][0][RTW89_QATAR][14] = 46, + [2][0][RTW89_FCC][15] = 74, + [2][0][RTW89_ETSI][15] = 46, + [2][0][RTW89_MKK][15] = 70, + [2][0][RTW89_IC][15] = 74, + [2][0][RTW89_KCC][15] = 70, + [2][0][RTW89_ACMA][15] = 46, + [2][0][RTW89_CHILE][15] = 54, + [2][0][RTW89_UKRAINE][15] = 46, + [2][0][RTW89_MEXICO][15] = 74, + [2][0][RTW89_CN][15] = 127, + [2][0][RTW89_QATAR][15] = 46, + [2][0][RTW89_FCC][17] = 74, + [2][0][RTW89_ETSI][17] = 46, + [2][0][RTW89_MKK][17] = 70, + [2][0][RTW89_IC][17] = 74, + [2][0][RTW89_KCC][17] = 70, + [2][0][RTW89_ACMA][17] = 46, + [2][0][RTW89_CHILE][17] = 54, + [2][0][RTW89_UKRAINE][17] = 46, + [2][0][RTW89_MEXICO][17] = 74, + [2][0][RTW89_CN][17] = 127, + [2][0][RTW89_QATAR][17] = 46, + [2][0][RTW89_FCC][19] = 74, + [2][0][RTW89_ETSI][19] = 46, + [2][0][RTW89_MKK][19] = 70, + [2][0][RTW89_IC][19] = 74, + [2][0][RTW89_KCC][19] = 70, + [2][0][RTW89_ACMA][19] = 46, + [2][0][RTW89_CHILE][19] = 54, + [2][0][RTW89_UKRAINE][19] = 46, + [2][0][RTW89_MEXICO][19] = 74, + [2][0][RTW89_CN][19] = 127, + [2][0][RTW89_QATAR][19] = 46, + [2][0][RTW89_FCC][21] = 74, + [2][0][RTW89_ETSI][21] = 46, + [2][0][RTW89_MKK][21] = 70, + [2][0][RTW89_IC][21] = 74, + [2][0][RTW89_KCC][21] = 70, + [2][0][RTW89_ACMA][21] = 46, + [2][0][RTW89_CHILE][21] = 54, + [2][0][RTW89_UKRAINE][21] = 46, + [2][0][RTW89_MEXICO][21] = 74, + [2][0][RTW89_CN][21] = 127, + [2][0][RTW89_QATAR][21] = 46, + [2][0][RTW89_FCC][23] = 74, + [2][0][RTW89_ETSI][23] = 46, + [2][0][RTW89_MKK][23] = 70, + [2][0][RTW89_IC][23] = 74, + [2][0][RTW89_KCC][23] = 70, + [2][0][RTW89_ACMA][23] = 46, + [2][0][RTW89_CHILE][23] = 54, + [2][0][RTW89_UKRAINE][23] = 46, + [2][0][RTW89_MEXICO][23] = 74, + [2][0][RTW89_CN][23] = 127, + [2][0][RTW89_QATAR][23] = 46, + [2][0][RTW89_FCC][25] = 74, + [2][0][RTW89_ETSI][25] = 46, + [2][0][RTW89_MKK][25] = 70, + [2][0][RTW89_IC][25] = 127, + [2][0][RTW89_KCC][25] = 70, + [2][0][RTW89_ACMA][25] = 127, + [2][0][RTW89_CHILE][25] = 54, + [2][0][RTW89_UKRAINE][25] = 46, + [2][0][RTW89_MEXICO][25] = 74, + [2][0][RTW89_CN][25] = 127, + [2][0][RTW89_QATAR][25] = 46, + [2][0][RTW89_FCC][27] = 74, + [2][0][RTW89_ETSI][27] = 46, + [2][0][RTW89_MKK][27] = 70, + [2][0][RTW89_IC][27] = 127, + [2][0][RTW89_KCC][27] = 70, + [2][0][RTW89_ACMA][27] = 127, + [2][0][RTW89_CHILE][27] = 54, + [2][0][RTW89_UKRAINE][27] = 46, + [2][0][RTW89_MEXICO][27] = 74, + [2][0][RTW89_CN][27] = 127, + [2][0][RTW89_QATAR][27] = 46, + [2][0][RTW89_FCC][29] = 74, + [2][0][RTW89_ETSI][29] = 46, + [2][0][RTW89_MKK][29] = 70, + [2][0][RTW89_IC][29] = 127, + [2][0][RTW89_KCC][29] = 70, + [2][0][RTW89_ACMA][29] = 127, + [2][0][RTW89_CHILE][29] = 54, + [2][0][RTW89_UKRAINE][29] = 46, + [2][0][RTW89_MEXICO][29] = 74, + [2][0][RTW89_CN][29] = 127, + [2][0][RTW89_QATAR][29] = 46, + [2][0][RTW89_FCC][31] = 74, + [2][0][RTW89_ETSI][31] = 46, + [2][0][RTW89_MKK][31] = 70, + [2][0][RTW89_IC][31] = 74, + [2][0][RTW89_KCC][31] = 70, + [2][0][RTW89_ACMA][31] = 46, + [2][0][RTW89_CHILE][31] = 54, + [2][0][RTW89_UKRAINE][31] = 46, + [2][0][RTW89_MEXICO][31] = 74, + [2][0][RTW89_CN][31] = 127, + [2][0][RTW89_QATAR][31] = 46, + [2][0][RTW89_FCC][33] = 74, + [2][0][RTW89_ETSI][33] = 46, + [2][0][RTW89_MKK][33] = 70, + [2][0][RTW89_IC][33] = 74, + [2][0][RTW89_KCC][33] = 70, + [2][0][RTW89_ACMA][33] = 46, + [2][0][RTW89_CHILE][33] = 54, + [2][0][RTW89_UKRAINE][33] = 46, + [2][0][RTW89_MEXICO][33] = 74, + [2][0][RTW89_CN][33] = 127, + [2][0][RTW89_QATAR][33] = 46, + [2][0][RTW89_FCC][35] = 74, + [2][0][RTW89_ETSI][35] = 46, + [2][0][RTW89_MKK][35] = 70, + [2][0][RTW89_IC][35] = 74, + [2][0][RTW89_KCC][35] = 70, + [2][0][RTW89_ACMA][35] = 46, + [2][0][RTW89_CHILE][35] = 54, + [2][0][RTW89_UKRAINE][35] = 46, + [2][0][RTW89_MEXICO][35] = 74, + [2][0][RTW89_CN][35] = 127, + [2][0][RTW89_QATAR][35] = 46, + [2][0][RTW89_FCC][37] = 74, + [2][0][RTW89_ETSI][37] = 127, + [2][0][RTW89_MKK][37] = 70, + [2][0][RTW89_IC][37] = 74, + [2][0][RTW89_KCC][37] = 70, + [2][0][RTW89_ACMA][37] = 74, + [2][0][RTW89_CHILE][37] = 54, + [2][0][RTW89_UKRAINE][37] = 127, + [2][0][RTW89_MEXICO][37] = 74, + [2][0][RTW89_CN][37] = 127, + [2][0][RTW89_QATAR][37] = 127, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_ETSI][38] = 28, + [2][0][RTW89_MKK][38] = 127, + [2][0][RTW89_IC][38] = 76, + [2][0][RTW89_KCC][38] = 70, + [2][0][RTW89_ACMA][38] = 76, + [2][0][RTW89_CHILE][38] = 54, + [2][0][RTW89_UKRAINE][38] = 28, + [2][0][RTW89_MEXICO][38] = 76, + [2][0][RTW89_CN][38] = 76, + [2][0][RTW89_QATAR][38] = 28, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_ETSI][40] = 28, + [2][0][RTW89_MKK][40] = 127, + [2][0][RTW89_IC][40] = 76, + [2][0][RTW89_KCC][40] = 70, + [2][0][RTW89_ACMA][40] = 76, + [2][0][RTW89_CHILE][40] = 54, + [2][0][RTW89_UKRAINE][40] = 28, + [2][0][RTW89_MEXICO][40] = 76, + [2][0][RTW89_CN][40] = 76, + [2][0][RTW89_QATAR][40] = 28, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_ETSI][42] = 28, + [2][0][RTW89_MKK][42] = 127, + [2][0][RTW89_IC][42] = 76, + [2][0][RTW89_KCC][42] = 70, + [2][0][RTW89_ACMA][42] = 76, + [2][0][RTW89_CHILE][42] = 54, + [2][0][RTW89_UKRAINE][42] = 28, + [2][0][RTW89_MEXICO][42] = 76, + [2][0][RTW89_CN][42] = 76, + [2][0][RTW89_QATAR][42] = 28, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_ETSI][44] = 28, + [2][0][RTW89_MKK][44] = 127, + [2][0][RTW89_IC][44] = 76, + [2][0][RTW89_KCC][44] = 70, + [2][0][RTW89_ACMA][44] = 76, + [2][0][RTW89_CHILE][44] = 54, + [2][0][RTW89_UKRAINE][44] = 28, + [2][0][RTW89_MEXICO][44] = 76, + [2][0][RTW89_CN][44] = 76, + [2][0][RTW89_QATAR][44] = 28, + [2][0][RTW89_FCC][46] = 76, + [2][0][RTW89_ETSI][46] = 28, + [2][0][RTW89_MKK][46] = 127, + [2][0][RTW89_IC][46] = 76, + [2][0][RTW89_KCC][46] = 70, + [2][0][RTW89_ACMA][46] = 76, + [2][0][RTW89_CHILE][46] = 54, + [2][0][RTW89_UKRAINE][46] = 28, + [2][0][RTW89_MEXICO][46] = 76, + [2][0][RTW89_CN][46] = 76, + [2][0][RTW89_QATAR][46] = 28, + [2][1][RTW89_FCC][0] = 58, + [2][1][RTW89_ETSI][0] = 32, + [2][1][RTW89_MKK][0] = 38, + [2][1][RTW89_IC][0] = 30, + [2][1][RTW89_KCC][0] = 54, + [2][1][RTW89_ACMA][0] = 32, + [2][1][RTW89_CHILE][0] = 18, + [2][1][RTW89_UKRAINE][0] = 32, + [2][1][RTW89_MEXICO][0] = 50, + [2][1][RTW89_CN][0] = 32, + [2][1][RTW89_QATAR][0] = 32, + [2][1][RTW89_FCC][2] = 58, + [2][1][RTW89_ETSI][2] = 32, + [2][1][RTW89_MKK][2] = 38, + [2][1][RTW89_IC][2] = 30, + [2][1][RTW89_KCC][2] = 54, + [2][1][RTW89_ACMA][2] = 32, + [2][1][RTW89_CHILE][2] = 18, + [2][1][RTW89_UKRAINE][2] = 32, + [2][1][RTW89_MEXICO][2] = 50, + [2][1][RTW89_CN][2] = 32, + [2][1][RTW89_QATAR][2] = 32, + [2][1][RTW89_FCC][4] = 58, + [2][1][RTW89_ETSI][4] = 32, + [2][1][RTW89_MKK][4] = 38, + [2][1][RTW89_IC][4] = 30, + [2][1][RTW89_KCC][4] = 54, + [2][1][RTW89_ACMA][4] = 32, + [2][1][RTW89_CHILE][4] = 18, + [2][1][RTW89_UKRAINE][4] = 32, + [2][1][RTW89_MEXICO][4] = 50, + [2][1][RTW89_CN][4] = 32, + [2][1][RTW89_QATAR][4] = 32, + [2][1][RTW89_FCC][6] = 58, + [2][1][RTW89_ETSI][6] = 32, + [2][1][RTW89_MKK][6] = 38, + [2][1][RTW89_IC][6] = 30, + [2][1][RTW89_KCC][6] = 26, + [2][1][RTW89_ACMA][6] = 32, + [2][1][RTW89_CHILE][6] = 18, + [2][1][RTW89_UKRAINE][6] = 32, + [2][1][RTW89_MEXICO][6] = 50, + [2][1][RTW89_CN][6] = 32, + [2][1][RTW89_QATAR][6] = 32, + [2][1][RTW89_FCC][8] = 58, + [2][1][RTW89_ETSI][8] = 32, + [2][1][RTW89_MKK][8] = 38, + [2][1][RTW89_IC][8] = 52, + [2][1][RTW89_KCC][8] = 54, + [2][1][RTW89_ACMA][8] = 32, + [2][1][RTW89_CHILE][8] = 42, + [2][1][RTW89_UKRAINE][8] = 32, + [2][1][RTW89_MEXICO][8] = 58, + [2][1][RTW89_CN][8] = 32, + [2][1][RTW89_QATAR][8] = 32, + [2][1][RTW89_FCC][10] = 58, + [2][1][RTW89_ETSI][10] = 32, + [2][1][RTW89_MKK][10] = 38, + [2][1][RTW89_IC][10] = 52, + [2][1][RTW89_KCC][10] = 54, + [2][1][RTW89_ACMA][10] = 32, + [2][1][RTW89_CHILE][10] = 42, + [2][1][RTW89_UKRAINE][10] = 32, + [2][1][RTW89_MEXICO][10] = 58, + [2][1][RTW89_CN][10] = 32, + [2][1][RTW89_QATAR][10] = 32, + [2][1][RTW89_FCC][12] = 58, + [2][1][RTW89_ETSI][12] = 32, + [2][1][RTW89_MKK][12] = 38, + [2][1][RTW89_IC][12] = 52, + [2][1][RTW89_KCC][12] = 54, + [2][1][RTW89_ACMA][12] = 32, + [2][1][RTW89_CHILE][12] = 42, + [2][1][RTW89_UKRAINE][12] = 32, + [2][1][RTW89_MEXICO][12] = 58, + [2][1][RTW89_CN][12] = 32, + [2][1][RTW89_QATAR][12] = 32, + [2][1][RTW89_FCC][14] = 58, + [2][1][RTW89_ETSI][14] = 32, + [2][1][RTW89_MKK][14] = 38, + [2][1][RTW89_IC][14] = 52, + [2][1][RTW89_KCC][14] = 54, + [2][1][RTW89_ACMA][14] = 32, + [2][1][RTW89_CHILE][14] = 42, + [2][1][RTW89_UKRAINE][14] = 32, + [2][1][RTW89_MEXICO][14] = 58, + [2][1][RTW89_CN][14] = 32, + [2][1][RTW89_QATAR][14] = 32, + [2][1][RTW89_FCC][15] = 58, + [2][1][RTW89_ETSI][15] = 32, + [2][1][RTW89_MKK][15] = 58, + [2][1][RTW89_IC][15] = 58, + [2][1][RTW89_KCC][15] = 54, + [2][1][RTW89_ACMA][15] = 32, + [2][1][RTW89_CHILE][15] = 42, + [2][1][RTW89_UKRAINE][15] = 32, + [2][1][RTW89_MEXICO][15] = 58, + [2][1][RTW89_CN][15] = 127, + [2][1][RTW89_QATAR][15] = 32, + [2][1][RTW89_FCC][17] = 58, + [2][1][RTW89_ETSI][17] = 32, + [2][1][RTW89_MKK][17] = 58, + [2][1][RTW89_IC][17] = 58, + [2][1][RTW89_KCC][17] = 54, + [2][1][RTW89_ACMA][17] = 32, + [2][1][RTW89_CHILE][17] = 42, + [2][1][RTW89_UKRAINE][17] = 32, + [2][1][RTW89_MEXICO][17] = 58, + [2][1][RTW89_CN][17] = 127, + [2][1][RTW89_QATAR][17] = 32, + [2][1][RTW89_FCC][19] = 58, + [2][1][RTW89_ETSI][19] = 32, + [2][1][RTW89_MKK][19] = 58, + [2][1][RTW89_IC][19] = 58, + [2][1][RTW89_KCC][19] = 54, + [2][1][RTW89_ACMA][19] = 32, + [2][1][RTW89_CHILE][19] = 42, + [2][1][RTW89_UKRAINE][19] = 32, + [2][1][RTW89_MEXICO][19] = 58, + [2][1][RTW89_CN][19] = 127, + [2][1][RTW89_QATAR][19] = 32, + [2][1][RTW89_FCC][21] = 58, + [2][1][RTW89_ETSI][21] = 32, + [2][1][RTW89_MKK][21] = 58, + [2][1][RTW89_IC][21] = 58, + [2][1][RTW89_KCC][21] = 54, + [2][1][RTW89_ACMA][21] = 32, + [2][1][RTW89_CHILE][21] = 42, + [2][1][RTW89_UKRAINE][21] = 32, + [2][1][RTW89_MEXICO][21] = 58, + [2][1][RTW89_CN][21] = 127, + [2][1][RTW89_QATAR][21] = 32, + [2][1][RTW89_FCC][23] = 58, + [2][1][RTW89_ETSI][23] = 32, + [2][1][RTW89_MKK][23] = 58, + [2][1][RTW89_IC][23] = 58, + [2][1][RTW89_KCC][23] = 54, + [2][1][RTW89_ACMA][23] = 32, + [2][1][RTW89_CHILE][23] = 42, + [2][1][RTW89_UKRAINE][23] = 32, + [2][1][RTW89_MEXICO][23] = 58, + [2][1][RTW89_CN][23] = 127, + [2][1][RTW89_QATAR][23] = 32, + [2][1][RTW89_FCC][25] = 58, + [2][1][RTW89_ETSI][25] = 32, + [2][1][RTW89_MKK][25] = 58, + [2][1][RTW89_IC][25] = 127, + [2][1][RTW89_KCC][25] = 54, + [2][1][RTW89_ACMA][25] = 127, + [2][1][RTW89_CHILE][25] = 42, + [2][1][RTW89_UKRAINE][25] = 32, + [2][1][RTW89_MEXICO][25] = 58, + [2][1][RTW89_CN][25] = 127, + [2][1][RTW89_QATAR][25] = 32, + [2][1][RTW89_FCC][27] = 58, + [2][1][RTW89_ETSI][27] = 32, + [2][1][RTW89_MKK][27] = 58, + [2][1][RTW89_IC][27] = 127, + [2][1][RTW89_KCC][27] = 54, + [2][1][RTW89_ACMA][27] = 127, + [2][1][RTW89_CHILE][27] = 42, + [2][1][RTW89_UKRAINE][27] = 32, + [2][1][RTW89_MEXICO][27] = 58, + [2][1][RTW89_CN][27] = 127, + [2][1][RTW89_QATAR][27] = 32, + [2][1][RTW89_FCC][29] = 58, + [2][1][RTW89_ETSI][29] = 32, + [2][1][RTW89_MKK][29] = 58, + [2][1][RTW89_IC][29] = 127, + [2][1][RTW89_KCC][29] = 54, + [2][1][RTW89_ACMA][29] = 127, + [2][1][RTW89_CHILE][29] = 42, + [2][1][RTW89_UKRAINE][29] = 32, + [2][1][RTW89_MEXICO][29] = 58, + [2][1][RTW89_CN][29] = 127, + [2][1][RTW89_QATAR][29] = 32, + [2][1][RTW89_FCC][31] = 58, + [2][1][RTW89_ETSI][31] = 32, + [2][1][RTW89_MKK][31] = 58, + [2][1][RTW89_IC][31] = 58, + [2][1][RTW89_KCC][31] = 54, + [2][1][RTW89_ACMA][31] = 32, + [2][1][RTW89_CHILE][31] = 42, + [2][1][RTW89_UKRAINE][31] = 32, + [2][1][RTW89_MEXICO][31] = 58, + [2][1][RTW89_CN][31] = 127, + [2][1][RTW89_QATAR][31] = 32, + [2][1][RTW89_FCC][33] = 58, + [2][1][RTW89_ETSI][33] = 32, + [2][1][RTW89_MKK][33] = 58, + [2][1][RTW89_IC][33] = 58, + [2][1][RTW89_KCC][33] = 54, + [2][1][RTW89_ACMA][33] = 32, + [2][1][RTW89_CHILE][33] = 42, + [2][1][RTW89_UKRAINE][33] = 32, + [2][1][RTW89_MEXICO][33] = 58, + [2][1][RTW89_CN][33] = 127, + [2][1][RTW89_QATAR][33] = 32, + [2][1][RTW89_FCC][35] = 58, + [2][1][RTW89_ETSI][35] = 32, + [2][1][RTW89_MKK][35] = 58, + [2][1][RTW89_IC][35] = 58, + [2][1][RTW89_KCC][35] = 54, + [2][1][RTW89_ACMA][35] = 32, + [2][1][RTW89_CHILE][35] = 42, + [2][1][RTW89_UKRAINE][35] = 32, + [2][1][RTW89_MEXICO][35] = 58, + [2][1][RTW89_CN][35] = 127, + [2][1][RTW89_QATAR][35] = 32, + [2][1][RTW89_FCC][37] = 58, + [2][1][RTW89_ETSI][37] = 127, + [2][1][RTW89_MKK][37] = 58, + [2][1][RTW89_IC][37] = 58, + [2][1][RTW89_KCC][37] = 54, + [2][1][RTW89_ACMA][37] = 62, + [2][1][RTW89_CHILE][37] = 42, + [2][1][RTW89_UKRAINE][37] = 127, + [2][1][RTW89_MEXICO][37] = 58, + [2][1][RTW89_CN][37] = 127, + [2][1][RTW89_QATAR][37] = 127, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_ETSI][38] = 16, + [2][1][RTW89_MKK][38] = 127, + [2][1][RTW89_IC][38] = 76, + [2][1][RTW89_KCC][38] = 54, + [2][1][RTW89_ACMA][38] = 76, + [2][1][RTW89_CHILE][38] = 42, + [2][1][RTW89_UKRAINE][38] = 16, + [2][1][RTW89_MEXICO][38] = 76, + [2][1][RTW89_CN][38] = 64, + [2][1][RTW89_QATAR][38] = 16, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_ETSI][40] = 16, + [2][1][RTW89_MKK][40] = 127, + [2][1][RTW89_IC][40] = 76, + [2][1][RTW89_KCC][40] = 54, + [2][1][RTW89_ACMA][40] = 76, + [2][1][RTW89_CHILE][40] = 42, + [2][1][RTW89_UKRAINE][40] = 16, + [2][1][RTW89_MEXICO][40] = 76, + [2][1][RTW89_CN][40] = 64, + [2][1][RTW89_QATAR][40] = 16, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_ETSI][42] = 16, + [2][1][RTW89_MKK][42] = 127, + [2][1][RTW89_IC][42] = 76, + [2][1][RTW89_KCC][42] = 54, + [2][1][RTW89_ACMA][42] = 76, + [2][1][RTW89_CHILE][42] = 42, + [2][1][RTW89_UKRAINE][42] = 16, + [2][1][RTW89_MEXICO][42] = 76, + [2][1][RTW89_CN][42] = 64, + [2][1][RTW89_QATAR][42] = 16, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_ETSI][44] = 16, + [2][1][RTW89_MKK][44] = 127, + [2][1][RTW89_IC][44] = 76, + [2][1][RTW89_KCC][44] = 54, + [2][1][RTW89_ACMA][44] = 76, + [2][1][RTW89_CHILE][44] = 42, + [2][1][RTW89_UKRAINE][44] = 16, + [2][1][RTW89_MEXICO][44] = 76, + [2][1][RTW89_CN][44] = 64, + [2][1][RTW89_QATAR][44] = 16, + [2][1][RTW89_FCC][46] = 76, + [2][1][RTW89_ETSI][46] = 16, + [2][1][RTW89_MKK][46] = 127, + [2][1][RTW89_IC][46] = 76, + [2][1][RTW89_KCC][46] = 54, + [2][1][RTW89_ACMA][46] = 76, + [2][1][RTW89_CHILE][46] = 42, + [2][1][RTW89_UKRAINE][46] = 16, + [2][1][RTW89_MEXICO][46] = 76, + [2][1][RTW89_CN][46] = 64, + [2][1][RTW89_QATAR][46] = 16, }; #define DECLARE_DIG_TABLE(name) \ diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index f1e0fe36107d..75b11249f306 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -140,52 +140,56 @@ le32_get_bits((rxdesc)->dword5, GENMASK(7, 0)) #define RTW89_GET_RXINFO_USR_NUM(rpt) \ - le32_get_bits(*((__le32 *)rpt), GENMASK(3, 0)) + le32_get_bits(*((const __le32 *)rpt), GENMASK(3, 0)) #define RTW89_GET_RXINFO_FW_DEFINE(rpt) \ - le32_get_bits(*((__le32 *)rpt), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)rpt), GENMASK(15, 8)) #define RTW89_GET_RXINFO_LSIG_LEN(rpt) \ - le32_get_bits(*((__le32 *)rpt), GENMASK(27, 16)) + le32_get_bits(*((const __le32 *)rpt), GENMASK(27, 16)) #define RTW89_GET_RXINFO_IS_TO_SELF(rpt) \ - le32_get_bits(*((__le32 *)rpt), BIT(28)) + le32_get_bits(*((const __le32 *)rpt), BIT(28)) #define RTW89_GET_RXINFO_RX_CNT_VLD(rpt) \ - le32_get_bits(*((__le32 *)rpt), BIT(29)) + le32_get_bits(*((const __le32 *)rpt), BIT(29)) #define RTW89_GET_RXINFO_LONG_RXD(rpt) \ - le32_get_bits(*((__le32 *)rpt), GENMASK(31, 30)) + le32_get_bits(*((const __le32 *)rpt), GENMASK(31, 30)) #define RTW89_GET_RXINFO_SERVICE(rpt) \ - le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(15, 0)) + le32_get_bits(*((const __le32 *)(rpt) + 1), GENMASK(15, 0)) #define RTW89_GET_RXINFO_PLCP_LEN(rpt) \ - le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(rpt) + 1), GENMASK(23, 16)) #define RTW89_GET_RXINFO_MAC_ID_VALID(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(0)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(0)) #define RTW89_GET_RXINFO_DATA(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(1)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(1)) #define RTW89_GET_RXINFO_CTRL(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(2)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(2)) #define RTW89_GET_RXINFO_MGMT(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(3)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(3)) #define RTW89_GET_RXINFO_BCM(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(4)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), BIT(4)) #define RTW89_GET_RXINFO_MACID(rpt, usr) \ - le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(rpt) + (usr) + 2), GENMASK(15, 8)) +#define RTW89_GET_PHY_STS_IE_MAP(sts) \ + le32_get_bits(*((const __le32 *)(sts)), GENMASK(4, 0)) #define RTW89_GET_PHY_STS_RSSI_A(sts) \ - le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(7, 0)) + le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(7, 0)) #define RTW89_GET_PHY_STS_RSSI_B(sts) \ - le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(15, 8)) #define RTW89_GET_PHY_STS_RSSI_C(sts) \ - le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(23, 16)) + le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(23, 16)) #define RTW89_GET_PHY_STS_RSSI_D(sts) \ - le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(31, 24)) + le32_get_bits(*((const __le32 *)(sts) + 1), GENMASK(31, 24)) #define RTW89_GET_PHY_STS_LEN(sts) \ - le32_get_bits(*((__le32 *)sts), GENMASK(15, 8)) + le32_get_bits(*((const __le32 *)sts), GENMASK(15, 8)) #define RTW89_GET_PHY_STS_RSSI_AVG(sts) \ - le32_get_bits(*((__le32 *)sts), GENMASK(31, 24)) + le32_get_bits(*((const __le32 *)sts), GENMASK(31, 24)) #define RTW89_GET_PHY_STS_IE_TYPE(ie) \ - le32_get_bits(*((__le32 *)ie), GENMASK(4, 0)) + le32_get_bits(*((const __le32 *)ie), GENMASK(4, 0)) #define RTW89_GET_PHY_STS_IE_LEN(ie) \ - le32_get_bits(*((__le32 *)ie), GENMASK(11, 5)) -#define RTW89_GET_PHY_STS_IE0_CFO(ie) \ - le32_get_bits(*((__le32 *)(ie) + 1), GENMASK(31, 20)) + le32_get_bits(*((const __le32 *)ie), GENMASK(11, 5)) +#define RTW89_GET_PHY_STS_IE01_CH_IDX(ie) \ + le32_get_bits(*((const __le32 *)ie), GENMASK(23, 16)) +#define RTW89_GET_PHY_STS_IE01_CFO(ie) \ + le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(31, 20)) enum rtw89_tx_channel { RTW89_TXCH_ACH0 = 0, @@ -251,45 +255,6 @@ enum rtw89_tx_qsel { /* reserved */ }; -enum rtw89_phy_status_ie_type { - RTW89_PHYSTS_IE00_CMN_CCK = 0, - RTW89_PHYSTS_IE01_CMN_OFDM = 1, - RTW89_PHYSTS_IE02_CMN_EXT_AX = 2, - RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3, - RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4, - RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5, - RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6, - RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7, - RTW89_PHYSTS_IE08_FTR_CH = 8, - RTW89_PHYSTS_IE09_FTR_PLCP_0 = 9, - RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10, - RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11, - RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12, - RTW89_PHYSTS_IE13_DL_MU_DEF = 13, - RTW89_PHYSTS_IE14_TB_UL_CQI = 14, - RTW89_PHYSTS_IE15_TB_UL_DEF = 15, - RTW89_PHYSTS_IE16_RSVD16 = 16, - RTW89_PHYSTS_IE17_TB_UL_CTRL = 17, - RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18, - RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19, - RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20, - RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21, - RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22, - RTW89_PHYSTS_IE23_RSVD23 = 23, - RTW89_PHYSTS_IE24_DBG_OFDM_TD_PATH_A = 24, - RTW89_PHYSTS_IE25_DBG_OFDM_TD_PATH_B = 25, - RTW89_PHYSTS_IE26_DBG_OFDM_TD_PATH_C = 26, - RTW89_PHYSTS_IE27_DBG_OFDM_TD_PATH_D = 27, - RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28, - RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29, - RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30, - RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31, - - /* keep last */ - RTW89_PHYSTS_IE_NUM, - RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1 -}; - static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid) { switch (tid) { diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index e70c1c7fdf59..913e11fb3807 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1108,6 +1108,9 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, break; } + if (ii >= RSI_MAX_VIFS) + return status; + mutex_lock(&common->mutex); if (ssn != NULL) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1bf71e6c608..5d1490fc32db 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -23,6 +23,7 @@ #include "rsi_common.h" #include "rsi_coex.h" #include "rsi_hal.h" +#include "rsi_usb.h" u32 rsi_zone_enabled = /* INFO_ZONE | INIT_ZONE | @@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; + if (!rcv_pkt_len && offset > + RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ) + goto fail; queueno = rsi_get_queueno(frame_desc, offset); length = rsi_get_length(frame_desc, offset); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 6821ea991895..66fe386ec9cc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb) struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data; int status = -EINVAL; + if (!rx_cb->rx_skb) + return; + if (urb->status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; return; } @@ -294,8 +298,10 @@ out: if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC)) rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); - if (status) + if (status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; + } } static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) @@ -324,7 +330,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) struct sk_buff *skb; u8 dword_align_bytes = 0; -#define RSI_MAX_RX_USB_PKT_SIZE 3000 skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 254d19b66412..961851748bc4 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -44,6 +44,8 @@ #define RSI_USB_BUF_SIZE 4096 #define RSI_USB_CTRL_BUF_SIZE 0x04 +#define RSI_MAX_RX_USB_PKT_SIZE 3000 + struct rx_usb_ctrl_block { u8 *data; struct urb *rx_urb; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 136a0d3b23c9..a25a6143e65f 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1520,6 +1520,12 @@ int wl1251_init_ieee80211(struct wl1251 *wl) wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wl->hw->wiphy->max_scan_ssids = 1; + + /* We set max_scan_ie_len to a random value to make wpa_supplicant scans not + * fail, as the driver will the ignore the extra passed IEs anyway + */ + wl->hw->wiphy->max_scan_ie_len = 512; + wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz; wl->hw->queues = 4; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 9fd8cf2d270c..72fc41ac83c0 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -26,7 +26,7 @@ #include "wl12xx_80211.h" #include "io.h" -static bool dump = false; +static bool dump; struct wl12xx_sdio_glue { struct device *dev; diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 17543be14665..609fd4a2c865 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -16,6 +16,17 @@ config WWAN if WWAN +config WWAN_DEBUGFS + bool "WWAN devices debugfs interface" if EXPERT + depends on DEBUG_FS + default y + help + Enables debugfs infrastructure for the WWAN core and device drivers. + + If this option is selected, then you can find the debug interface + elements for each WWAN device in a directory that is corresponding to + the device name: debugfs/wwan/wwanX. + config WWAN_HWSIM tristate "Simulated WWAN device" help @@ -50,6 +61,19 @@ config MHI_WWAN_MBIM To compile this driver as a module, choose M here: the module will be called mhi_wwan_mbim. +config QCOM_BAM_DMUX + tristate "Qualcomm BAM-DMUX WWAN network driver" + depends on (DMA_ENGINE && PM && QCOM_SMEM_STATE) || COMPILE_TEST + help + The BAM Data Multiplexer provides access to the network data channels + of modems integrated into many older Qualcomm SoCs, e.g. Qualcomm + MSM8916 or MSM8974. The connection can be established via QMI/AT from + userspace with control ports available through the WWAN subsystem + (CONFIG_RPMSG_WWAN_CTRL) or QRTR network sockets (CONFIG_QRTR). + + To compile this driver as a module, choose M here: the module will be + called qcom_bam_dmux. + config RPMSG_WWAN_CTRL tristate "RPMSG WWAN control driver" depends on RPMSG @@ -72,6 +96,7 @@ config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" depends on INTEL_IOMMU select NET_DEVLINK + select RELAY if WWAN_DEBUGFS help This driver enables Intel M.2 WWAN Device communication. diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile index fe51feedac21..e722650bebea 100644 --- a/drivers/net/wwan/Makefile +++ b/drivers/net/wwan/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o +obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o obj-$(CONFIG_IOSM) += iosm/ diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile index b838034bb120..fa8d6afd18e1 100644 --- a/drivers/net/wwan/iosm/Makefile +++ b/drivers/net/wwan/iosm/Makefile @@ -23,4 +23,8 @@ iosm-y = \ iosm_ipc_flash.o \ iosm_ipc_coredump.o +iosm-$(CONFIG_WWAN_DEBUGFS) += \ + iosm_ipc_debugfs.o \ + iosm_ipc_trace.o + obj-$(CONFIG_IOSM) := iosm.o diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.c b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c new file mode 100644 index 000000000000..f2f57751a7d2 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include <linux/debugfs.h> +#include <linux/wwan.h> + +#include "iosm_ipc_imem.h" +#include "iosm_ipc_trace.h" +#include "iosm_ipc_debugfs.h" + +void ipc_debugfs_init(struct iosm_imem *ipc_imem) +{ + struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev); + + ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, + debugfs_pdev); + + ipc_imem->trace = ipc_trace_init(ipc_imem); + if (!ipc_imem->trace) + dev_warn(ipc_imem->dev, "trace channel init failed"); +} + +void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) +{ + ipc_trace_deinit(ipc_imem->trace); + debugfs_remove_recursive(ipc_imem->debugfs_dir); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_debugfs.h b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h new file mode 100644 index 000000000000..8a84bfa2c14a --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_debugfs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef IOSM_IPC_DEBUGFS_H +#define IOSM_IPC_DEBUGFS_H + +#ifdef CONFIG_WWAN_DEBUGFS +void ipc_debugfs_init(struct iosm_imem *ipc_imem); +void ipc_debugfs_deinit(struct iosm_imem *ipc_imem); +#else +static inline void ipc_debugfs_init(struct iosm_imem *ipc_imem) {} +static inline void ipc_debugfs_deinit(struct iosm_imem *ipc_imem) {} +#endif + +#endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index cff3b43ca4d7..f9e8e0ee4de3 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -10,6 +10,8 @@ #include "iosm_ipc_flash.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_port.h" +#include "iosm_ipc_trace.h" +#include "iosm_ipc_debugfs.h" /* Check the wwan ips if it is valid with Channel as input. */ static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl) @@ -132,7 +134,6 @@ static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem, * for channel alloc function. */ cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID; - cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES; return 0; } @@ -181,9 +182,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer) bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; + bool hpda_ctrl_pending = false; struct sk_buff_head *ul_list; bool hpda_pending = false; - bool forced_hpdu = false; struct ipc_pipe *pipe; int i; @@ -200,15 +201,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem) ul_list = &channel->ul_list; /* Fill the transfer descriptor with the uplink buffer info. */ - hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + if (!ipc_imem_check_wwan_ips(channel)) { + hpda_ctrl_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, pipe, ul_list); - - /* forced HP update needed for non data channels */ - if (hpda_pending && !ipc_imem_check_wwan_ips(channel)) - forced_hpdu = true; + } else { + hpda_pending |= + ipc_protocol_ul_td_send(ipc_imem->ipc_protocol, + pipe, ul_list); + } } - if (forced_hpdu) { + /* forced HP update needed for non data channels */ + if (hpda_ctrl_pending) { hpda_pending = false; ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, IPC_HP_UL_WRITE_TD); @@ -265,9 +270,14 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem, switch (pipe->channel->ctype) { case IPC_CTYPE_CTRL: port_id = pipe->channel->channel_id; + ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len, + IPC_CB(skb)->mapping, + IPC_CB(skb)->direction); if (port_id == IPC_MEM_CTRL_CHL_ID_7) ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb); + else if (ipc_is_trace_channel(ipc_imem, port_id)) + ipc_trace_port_rx(ipc_imem, skb); else wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb); @@ -527,6 +537,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) return; } + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) + ipc_devlink_deinit(ipc_imem->ipc_devlink); + if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg)) ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem); @@ -548,6 +561,8 @@ static void ipc_imem_run_state_worker(struct work_struct *instance) ctrl_chl_idx++; } + ipc_debugfs_init(ipc_imem); + ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0, false); @@ -1163,11 +1178,12 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) { ipc_mux_deinit(ipc_imem->mux); + ipc_debugfs_deinit(ipc_imem); ipc_wwan_deinit(ipc_imem->wwan); ipc_port_deinit(ipc_imem->ipc_port); } - if (ipc_imem->ipc_devlink) + if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag)) ipc_devlink_deinit(ipc_imem->ipc_devlink); ipc_imem_device_ipc_uninit(ipc_imem); @@ -1263,7 +1279,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, ipc_imem->pci_device_id = device_id; - ipc_imem->ev_cdev_write_pending = false; ipc_imem->cp_version = 0; ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP; @@ -1331,6 +1346,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, if (ipc_flash_link_establish(ipc_imem)) goto devlink_channel_fail; + + set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag); } return ipc_imem; devlink_channel_fail: diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index 6be6708b4eec..98554e9beb01 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -101,6 +101,7 @@ struct ipc_chnl_cfg; #define IOSM_CHIP_INFO_SIZE_MAX 100 #define FULLY_FUNCTIONAL 0 +#define IOSM_DEVLINK_INIT 1 /* List of the supported UL/DL pipes. */ enum ipc_mem_pipes { @@ -304,6 +305,7 @@ enum ipc_phase { * @sio: IPC SIO data structure pointer * @ipc_port: IPC PORT data structure pointer * @pcie: IPC PCIe + * @trace: IPC trace data structure pointer * @dev: Pointer to device structure * @ipc_requested_state: Expected IPC state on CP. * @channels: Channel list with UL/DL pipe pairs. @@ -335,11 +337,10 @@ enum ipc_phase { * process the irq actions. * @flag: Flag to monitor the state of driver * @td_update_timer_suspended: if true then td update timer suspend - * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass - * the accumulated uplink buffers to CP. * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass * @reset_det_n: Reset detect flag * @pcie_wake_n: Pcie wake flag + * @debugfs_dir: Debug FS directory for driver-specific entries */ struct iosm_imem { struct iosm_mmio *mmio; @@ -349,6 +350,9 @@ struct iosm_imem { struct iosm_mux *mux; struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS]; struct iosm_pcie *pcie; +#ifdef CONFIG_WWAN_DEBUGFS + struct iosm_trace *trace; +#endif struct device *dev; enum ipc_mem_device_ipc_state ipc_requested_state; struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS]; @@ -374,10 +378,12 @@ struct iosm_imem { u8 ev_irq_pending[IPC_IRQ_VECTORS]; unsigned long flag; u8 td_update_timer_suspended:1, - ev_cdev_write_pending:1, ev_mux_net_transmit_pending:1, reset_det_n:1, pcie_wake_n:1; +#ifdef CONFIG_WWAN_DEBUGFS + struct dentry *debugfs_dir; +#endif }; /** diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 825e8e5ffb2a..57304a5adf68 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, void *msg, size_t size) { - ipc_imem->ev_cdev_write_pending = false; ipc_imem_ul_send(ipc_imem); return 0; @@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, /* Through tasklet to do sio write. */ static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem) { - if (ipc_imem->ev_cdev_write_pending) - return -1; - - ipc_imem->ev_cdev_write_pending = true; - return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0, NULL, 0, false); } @@ -182,11 +176,14 @@ channel_unavailable: return false; } -/* Release a sio link to CP. */ -void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev) +/** + * ipc_imem_sys_port_close - Release a sio link to CP. + * @ipc_imem: Imem instance. + * @channel: Channel instance. + */ +void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem, + struct ipc_mem_channel *channel) { - struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem; - struct ipc_mem_channel *channel = ipc_cdev->channel; enum ipc_phase curr_phase; int status = 0; u32 tail = 0; @@ -450,6 +447,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) /* Release the pipe resources */ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); + ipc_imem->nr_of_channels--; } void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, @@ -643,6 +641,6 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data, memcpy(data, skb->data, skb->len); devlink_read_fail: - ipc_pcie_kfree_skb(devlink->pcie, skb); + dev_kfree_skb(skb); return rc; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index f0c88ac5643c..f8afb217d9e2 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -43,12 +43,8 @@ */ struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem, int chl_id, int hp_id); - -/** - * ipc_imem_sys_cdev_close - Release a sio link to CP. - * @ipc_cdev: iosm sio instance. - */ -void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev); +void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem, + struct ipc_mem_channel *channel); /** * ipc_imem_sys_cdev_write - Route the uplink buffer to CP. @@ -145,4 +141,5 @@ int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data, */ int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, unsigned char *buf, int count); + #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c index 09f94c123531..f09e5e77a2a5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c @@ -192,7 +192,7 @@ void ipc_mmio_config(struct iosm_mmio *ipc_mmio) iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end); iowrite64(ipc_mmio->context_info_addr, - ipc_mmio->base + ipc_mmio->offset.context_info); + ipc_mmio->base + ipc_mmio->offset.context_info); } void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr, diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c index c1c77ce699da..8e66ffe92055 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c @@ -97,7 +97,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux, /* Search for a free session interface id. */ if_id = le32_to_cpu(session_open->if_id); - if (if_id < 0 || if_id >= ipc_mux->nr_sessions) { + if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid interface id=%d", if_id); return false; } @@ -129,6 +129,7 @@ static bool ipc_mux_session_open(struct iosm_mux *ipc_mux, /* Save and return the assigned if id. */ session_open->if_id = cpu_to_le32(if_id); + ipc_mux->nr_sessions++; return true; } @@ -151,7 +152,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux, /* Copy the session interface id. */ if_id = le32_to_cpu(msg->if_id); - if (if_id < 0 || if_id >= ipc_mux->nr_sessions) { + if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid session id %d", if_id); return; } @@ -170,6 +171,7 @@ static void ipc_mux_session_close(struct iosm_mux *ipc_mux, ipc_mux->session[if_id].flow_ctl_mask = 0; ipc_mux_session_reset(ipc_mux, if_id); + ipc_mux->nr_sessions--; } static void ipc_mux_channel_close(struct iosm_mux *ipc_mux, @@ -178,7 +180,7 @@ static void ipc_mux_channel_close(struct iosm_mux *ipc_mux, int i; /* Free pending session UL packet. */ - for (i = 0; i < ipc_mux->nr_sessions; i++) + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) if (ipc_mux->session[i].wwan) ipc_mux_session_reset(ipc_mux, i); @@ -244,6 +246,11 @@ static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg) /* Release an IP session. */ ipc_mux->event = MUX_E_MUX_SESSION_CLOSE; ipc_mux_session_close(ipc_mux, &msg->session_close); + if (!ipc_mux->nr_sessions) { + ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE; + ipc_mux_channel_close(ipc_mux, + &msg->channel_close); + } ret = ipc_mux->channel_id; goto out; @@ -281,7 +288,6 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg, ipc_mux->protocol = mux_cfg->protocol; ipc_mux->ul_flow = mux_cfg->ul_flow; - ipc_mux->nr_sessions = mux_cfg->nr_sessions; ipc_mux->instance_id = mux_cfg->instance_id; ipc_mux->wwan_q_offset = 0; @@ -340,7 +346,7 @@ static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -365,7 +371,7 @@ static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -387,7 +393,7 @@ void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux) int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux) { - return ipc_mux ? ipc_mux->nr_sessions : -EFAULT; + return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT; } enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux) @@ -435,9 +441,11 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux) return; ipc_mux_stop_netif_for_all_sessions(ipc_mux); - channel_close = &mux_msg.channel_close; - channel_close->event = MUX_E_MUX_CHANNEL_CLOSE; - ipc_mux_schedule(ipc_mux, &mux_msg); + if (ipc_mux->state == MUX_S_ACTIVE) { + channel_close = &mux_msg.channel_close; + channel_close->event = MUX_E_MUX_CHANNEL_CLOSE; + ipc_mux_schedule(ipc_mux, &mux_msg); + } /* Empty the ADB free list. */ free_list = &ipc_mux->ul_adb.free_list; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index ddd2cd0bd911..88debaa1ed31 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -278,7 +278,6 @@ struct iosm_mux { struct ipc_mux_config { enum ipc_mux_protocol protocol; enum ipc_mux_ul_flow ul_flow; - int nr_sessions; int instance_id; }; diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index bdb2d32cdb6d..40fb54a0513e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -175,7 +175,7 @@ static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux, switch (le32_to_cpu(cmdh->command_type)) { case MUX_LITE_CMD_FLOW_CTL: - if (cmdh->if_id >= ipc_mux->nr_sessions) { + if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "if_id [%d] not valid", cmdh->if_id); return -EINVAL; /* No session interface id. */ @@ -307,13 +307,13 @@ static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux, } if_id = fct->if_id; - if (if_id >= ipc_mux->nr_sessions) { + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "not supported if_id: %d", if_id); return; } /* Is the session active ? */ - if_id = array_index_nospec(if_id, ipc_mux->nr_sessions); + if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES); wwan = ipc_mux->session[if_id].wwan; if (!wwan) { dev_err(ipc_mux->dev, "session Net ID is NULL"); @@ -355,13 +355,13 @@ static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux, } if_id = adgh->if_id; - if (if_id >= ipc_mux->nr_sessions) { + if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) { dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id); return; } /* Is the session active ? */ - if_id = array_index_nospec(if_id, ipc_mux->nr_sessions); + if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES); wwan = ipc_mux->session[if_id].wwan; if (!wwan) { dev_err(ipc_mux->dev, "session Net ID is NULL"); @@ -538,7 +538,7 @@ static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux) struct mux_session *session; int idx; - for (idx = 0; idx < ipc_mux->nr_sessions; idx++) { + for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) { session = &ipc_mux->session[idx]; if (!session->wwan) @@ -563,7 +563,7 @@ static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux) qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) + MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl); - for (i = 0; i < ipc_mux->nr_sessions; i++) { + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) { session = &ipc_mux->session[i]; if (!session->wwan || session->flow_ctl_mask) @@ -777,13 +777,13 @@ bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux) ipc_mux->adb_prep_ongoing = true; - for (i = 0; i < ipc_mux->nr_sessions; i++) { + for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) { session_id = ipc_mux->rr_next_session; session = &ipc_mux->session[session_id]; /* Go to next handle rr_next_session overflow */ ipc_mux->rr_next_session++; - if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions) + if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES) ipc_mux->rr_next_session = 0; if (!session->wwan || session->flow_ctl_mask || diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 2fe88b8be348..d73894e2a84e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -363,67 +363,22 @@ static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie) int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie) { - struct pci_dev *pdev; - int ret; - - pdev = ipc_pcie->pci; - - /* Execute D3 one time. */ - if (pdev->current_state != PCI_D0) { - dev_dbg(ipc_pcie->dev, "done for PM=%d", pdev->current_state); - return 0; - } - /* The HAL shall ask the shared memory layer whether D3 is allowed. */ ipc_imem_pm_suspend(ipc_pcie->imem); - /* Save the PCI configuration space of a device before suspending. */ - ret = pci_save_state(pdev); - - if (ret) { - dev_err(ipc_pcie->dev, "pci_save_state error=%d", ret); - return ret; - } - - /* Set the power state of a PCI device. - * Transition a device to a new power state, using the device's PCI PM - * registers. - */ - ret = pci_set_power_state(pdev, PCI_D3cold); - - if (ret) { - dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret); - return ret; - } - dev_dbg(ipc_pcie->dev, "SUSPEND done"); - return ret; + return 0; } int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie) { - int ret; - - /* Set the power state of a PCI device. - * Transition a device to a new power state, using the device's PCI PM - * registers. - */ - ret = pci_set_power_state(ipc_pcie->pci, PCI_D0); - - if (ret) { - dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret); - return ret; - } - - pci_restore_state(ipc_pcie->pci); - /* The HAL shall inform the shared memory layer that the device is * active. */ ipc_imem_pm_resume(ipc_pcie->imem); dev_dbg(ipc_pcie->dev, "RESUME done"); - return ret; + return 0; } static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev) diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c index beb944847398..b6d81c627277 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_port.c +++ b/drivers/net/wwan/iosm/iosm_ipc_port.c @@ -27,7 +27,7 @@ static void ipc_port_ctrl_stop(struct wwan_port *port) { struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port); - ipc_imem_sys_cdev_close(ipc_port); + ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel); } /* transfer control data to modem */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c new file mode 100644 index 000000000000..eeecfa3d10c5 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include <linux/wwan.h> +#include "iosm_ipc_trace.h" + +/* sub buffer size and number of sub buffer */ +#define IOSM_TRC_SUB_BUFF_SIZE 131072 +#define IOSM_TRC_N_SUB_BUFF 32 + +#define IOSM_TRC_FILE_PERM 0600 + +#define IOSM_TRC_DEBUGFS_TRACE "trace" +#define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl" + +/** + * ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer + * @ipc_imem: Pointer to iosm_imem structure + * @skb: Pointer to struct sk_buff + */ +void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb) +{ + struct iosm_trace *ipc_trace = ipc_imem->trace; + + if (ipc_trace->ipc_rchan) + relay_write(ipc_trace->ipc_rchan, skb->data, skb->len); + + dev_kfree_skb(skb); +} + +/* Creates relay file in debugfs. */ +static struct dentry * +ipc_trace_create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + *is_global = 1; + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +/* Removes relay file from debugfs. */ +static int ipc_trace_remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf, + void *prev_subbuf, + size_t prev_padding) +{ + if (relay_buf_full(buf)) { + pr_err_ratelimited("Relay_buf full dropping traces"); + return 0; + } + + return 1; +} + +/* Relay interface callbacks */ +static struct rchan_callbacks relay_callbacks = { + .subbuf_start = ipc_trace_subbuf_start_handler, + .create_buf_file = ipc_trace_create_buf_file_handler, + .remove_buf_file = ipc_trace_remove_buf_file_handler, +}; + +/* Copy the trace control mode to user buffer */ +static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iosm_trace *ipc_trace = filp->private_data; + char buf[16]; + int len; + + mutex_lock(&ipc_trace->trc_mutex); + len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode); + mutex_unlock(&ipc_trace->trc_mutex); + + return simple_read_from_buffer(buffer, count, ppos, buf, len); +} + +/* Open and close the trace channel depending on user input */ +static ssize_t ipc_trace_ctrl_file_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct iosm_trace *ipc_trace = filp->private_data; + unsigned long val; + int ret; + + ret = kstrtoul_from_user(buffer, count, 10, &val); + if (ret) + return ret; + + mutex_lock(&ipc_trace->trc_mutex); + if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) { + ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem, + ipc_trace->chl_id, + IPC_HP_CDEV_OPEN); + if (!ipc_trace->channel) { + ret = -EIO; + goto unlock; + } + ipc_trace->mode = TRACE_ENABLE; + } else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) { + ipc_trace->mode = TRACE_DISABLE; + /* close trace channel */ + ipc_imem_sys_port_close(ipc_trace->ipc_imem, + ipc_trace->channel); + relay_flush(ipc_trace->ipc_rchan); + } + ret = count; +unlock: + mutex_unlock(&ipc_trace->trc_mutex); + return ret; +} + +static const struct file_operations ipc_trace_fops = { + .open = simple_open, + .write = ipc_trace_ctrl_file_write, + .read = ipc_trace_ctrl_file_read, +}; + +/** + * ipc_trace_init - Create trace interface & debugfs entries + * @ipc_imem: Pointer to iosm_imem structure + * + * Returns: Pointer to trace instance on success else NULL + */ +struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem) +{ + struct ipc_chnl_cfg chnl_cfg = { 0 }; + struct iosm_trace *ipc_trace; + + ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3); + ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg, + IRQ_MOD_OFF); + + ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL); + if (!ipc_trace) + return NULL; + + ipc_trace->mode = TRACE_DISABLE; + ipc_trace->dev = ipc_imem->dev; + ipc_trace->ipc_imem = ipc_imem; + ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3; + + mutex_init(&ipc_trace->trc_mutex); + + ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL, + IOSM_TRC_FILE_PERM, + ipc_imem->debugfs_dir, + ipc_trace, &ipc_trace_fops); + + ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE, + ipc_imem->debugfs_dir, + IOSM_TRC_SUB_BUFF_SIZE, + IOSM_TRC_N_SUB_BUFF, + &relay_callbacks, NULL); + + return ipc_trace; +} + +/** + * ipc_trace_deinit - Closing relayfs, removing debugfs entries + * @ipc_trace: Pointer to the iosm_trace data struct + */ +void ipc_trace_deinit(struct iosm_trace *ipc_trace) +{ + if (!ipc_trace) + return; + + debugfs_remove(ipc_trace->ctrl_file); + relay_close(ipc_trace->ipc_rchan); + mutex_destroy(&ipc_trace->trc_mutex); + kfree(ipc_trace); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.h b/drivers/net/wwan/iosm/iosm_ipc_trace.h new file mode 100644 index 000000000000..5ebe7790585c --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_trace.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef IOSM_IPC_TRACE_H +#define IOSM_IPC_TRACE_H + +#include <linux/debugfs.h> +#include <linux/relay.h> + +#include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_imem_ops.h" + +/** + * enum trace_ctrl_mode - State of trace channel + * @TRACE_DISABLE: mode for disable trace + * @TRACE_ENABLE: mode for enable trace + */ +enum trace_ctrl_mode { + TRACE_DISABLE = 0, + TRACE_ENABLE, +}; + +/** + * struct iosm_trace - Struct for trace interface + * @ipc_rchan: Pointer to relay channel + * @ctrl_file: Pointer to trace control file + * @ipc_imem: Imem instance + * @dev: Pointer to device struct + * @channel: Channel instance + * @chl_id: Channel Indentifier + * @trc_mutex: Mutex used for read and write mode + * @mode: Mode for enable and disable trace + */ + +struct iosm_trace { + struct rchan *ipc_rchan; + struct dentry *ctrl_file; + struct iosm_imem *ipc_imem; + struct device *dev; + struct ipc_mem_channel *channel; + enum ipc_channel_id chl_id; + struct mutex trc_mutex; /* Mutex used for read and write mode */ + enum trace_ctrl_mode mode; +}; + +#ifdef CONFIG_WWAN_DEBUGFS + +static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id) +{ + return ipc_mem->trace && ipc_mem->trace->chl_id == chl_id; +} + +struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem); +void ipc_trace_deinit(struct iosm_trace *ipc_trace); +void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb); + +#else + +static inline bool ipc_is_trace_channel(struct iosm_imem *ipc_mem, u16 chl_id) +{ + return false; +} + +static inline void ipc_trace_port_rx(struct iosm_imem *ipc_imem, + struct sk_buff *skb) +{ + dev_kfree_skb(skb); +} + +#endif + +#endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index b571d9cedba4..27151148c782 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -8,6 +8,7 @@ #include <linux/if_link.h> #include <linux/rtnetlink.h> #include <linux/wwan.h> +#include <net/pkt_sched.h> #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_imem_ops.h" @@ -159,7 +160,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) { iosm_dev->header_ops = NULL; iosm_dev->hard_header_len = 0; - iosm_dev->priv_flags |= IFF_NO_QUEUE; + iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; iosm_dev->type = ARPHRD_NONE; iosm_dev->mtu = ETH_DATA_LEN; diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h index 4925f22dff0a..a23e926398ff 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.h +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h @@ -42,14 +42,4 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, * */ void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on); - -/** - * ipc_wwan_is_tx_stopped - Checks if Tx stopped for a Interface id. - * @ipc_wwan: Pointer to wwan instance - * @id: Ipc mux channel session id - * - * Return: true if stopped, false otherwise - */ -bool ipc_wwan_is_tx_stopped(struct iosm_wwan *ipc_wwan, int id); - #endif diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c new file mode 100644 index 000000000000..5dfa2eba6014 --- /dev/null +++ b/drivers/net/wwan/qcom_bam_dmux.c @@ -0,0 +1,907 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm BAM-DMUX WWAN network driver + * Copyright (c) 2020, Stephan Gerhold <stephan@gerhold.net> + */ + +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/if_arp.h> +#include <linux/interrupt.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/workqueue.h> +#include <net/pkt_sched.h> + +#define BAM_DMUX_BUFFER_SIZE SZ_2K +#define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr) +#define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE) +#define BAM_DMUX_NUM_SKB 32 + +#define BAM_DMUX_HDR_MAGIC 0x33fc + +#define BAM_DMUX_AUTOSUSPEND_DELAY 1000 +#define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000) + +enum { + BAM_DMUX_CMD_DATA, + BAM_DMUX_CMD_OPEN, + BAM_DMUX_CMD_CLOSE, +}; + +enum { + BAM_DMUX_CH_DATA_0, + BAM_DMUX_CH_DATA_1, + BAM_DMUX_CH_DATA_2, + BAM_DMUX_CH_DATA_3, + BAM_DMUX_CH_DATA_4, + BAM_DMUX_CH_DATA_5, + BAM_DMUX_CH_DATA_6, + BAM_DMUX_CH_DATA_7, + BAM_DMUX_NUM_CH +}; + +struct bam_dmux_hdr { + u16 magic; + u8 signal; + u8 cmd; + u8 pad; + u8 ch; + u16 len; +}; + +struct bam_dmux_skb_dma { + struct bam_dmux *dmux; + struct sk_buff *skb; + dma_addr_t addr; +}; + +struct bam_dmux { + struct device *dev; + + int pc_irq; + bool pc_state, pc_ack_state; + struct qcom_smem_state *pc, *pc_ack; + u32 pc_mask, pc_ack_mask; + wait_queue_head_t pc_wait; + struct completion pc_ack_completion; + + struct dma_chan *rx, *tx; + struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB]; + struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB]; + spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */ + unsigned int tx_next_skb; + atomic_long_t tx_deferred_skb; + struct work_struct tx_wakeup_work; + + DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH); + struct work_struct register_netdev_work; + struct net_device *netdevs[BAM_DMUX_NUM_CH]; +}; + +struct bam_dmux_netdev { + struct bam_dmux *dmux; + u8 ch; +}; + +static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable) +{ + reinit_completion(&dmux->pc_ack_completion); + qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask, + enable ? dmux->pc_mask : 0); +} + +static void bam_dmux_pc_ack(struct bam_dmux *dmux) +{ + qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask, + dmux->pc_ack_state ? 0 : dmux->pc_ack_mask); + dmux->pc_ack_state = !dmux->pc_ack_state; +} + +static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma, + enum dma_data_direction dir) +{ + struct device *dev = skb_dma->dmux->dev; + + skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir); + if (dma_mapping_error(dev, skb_dma->addr)) { + dev_err(dev, "Failed to DMA map buffer\n"); + skb_dma->addr = 0; + return false; + } + + return true; +} + +static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma, + enum dma_data_direction dir) +{ + dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir); + skb_dma->addr = 0; +} + +static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux) +{ + int i; + + dev_dbg(dmux->dev, "wake queues\n"); + + for (i = 0; i < BAM_DMUX_NUM_CH; ++i) { + struct net_device *netdev = dmux->netdevs[i]; + + if (netdev && netif_running(netdev)) + netif_wake_queue(netdev); + } +} + +static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux) +{ + int i; + + dev_dbg(dmux->dev, "stop queues\n"); + + for (i = 0; i < BAM_DMUX_NUM_CH; ++i) { + struct net_device *netdev = dmux->netdevs[i]; + + if (netdev) + netif_stop_queue(netdev); + } +} + +static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma) +{ + struct bam_dmux *dmux = skb_dma->dmux; + unsigned long flags; + + pm_runtime_mark_last_busy(dmux->dev); + pm_runtime_put_autosuspend(dmux->dev); + + if (skb_dma->addr) + bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE); + + spin_lock_irqsave(&dmux->tx_lock, flags); + skb_dma->skb = NULL; + if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB]) + bam_dmux_tx_wake_queues(dmux); + spin_unlock_irqrestore(&dmux->tx_lock, flags); +} + +static void bam_dmux_tx_callback(void *data) +{ + struct bam_dmux_skb_dma *skb_dma = data; + struct sk_buff *skb = skb_dma->skb; + + bam_dmux_tx_done(skb_dma); + dev_consume_skb_any(skb); +} + +static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma) +{ + struct bam_dmux *dmux = skb_dma->dmux; + struct dma_async_tx_descriptor *desc; + + desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr, + skb_dma->skb->len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n"); + return false; + } + + desc->callback = bam_dmux_tx_callback; + desc->callback_param = skb_dma; + desc->cookie = dmaengine_submit(desc); + return true; +} + +static struct bam_dmux_skb_dma * +bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb) +{ + struct bam_dmux_skb_dma *skb_dma; + unsigned long flags; + + spin_lock_irqsave(&dmux->tx_lock, flags); + + skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB]; + if (skb_dma->skb) { + bam_dmux_tx_stop_queues(dmux); + spin_unlock_irqrestore(&dmux->tx_lock, flags); + return NULL; + } + skb_dma->skb = skb; + + dmux->tx_next_skb++; + if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb) + bam_dmux_tx_stop_queues(dmux); + + spin_unlock_irqrestore(&dmux->tx_lock, flags); + return skb_dma; +} + +static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd) +{ + struct bam_dmux *dmux = bndev->dmux; + struct bam_dmux_skb_dma *skb_dma; + struct bam_dmux_hdr *hdr; + struct sk_buff *skb; + int ret; + + skb = alloc_skb(sizeof(*hdr), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->magic = BAM_DMUX_HDR_MAGIC; + hdr->cmd = cmd; + hdr->ch = bndev->ch; + + skb_dma = bam_dmux_tx_queue(dmux, skb); + if (!skb_dma) { + ret = -EAGAIN; + goto free_skb; + } + + ret = pm_runtime_get_sync(dmux->dev); + if (ret < 0) + goto tx_fail; + + if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) { + ret = -ENOMEM; + goto tx_fail; + } + + if (!bam_dmux_skb_dma_submit_tx(skb_dma)) { + ret = -EIO; + goto tx_fail; + } + + dma_async_issue_pending(dmux->tx); + return 0; + +tx_fail: + bam_dmux_tx_done(skb_dma); +free_skb: + dev_kfree_skb(skb); + return ret; +} + +static int bam_dmux_netdev_open(struct net_device *netdev) +{ + struct bam_dmux_netdev *bndev = netdev_priv(netdev); + int ret; + + ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN); + if (ret) + return ret; + + netif_start_queue(netdev); + return 0; +} + +static int bam_dmux_netdev_stop(struct net_device *netdev) +{ + struct bam_dmux_netdev *bndev = netdev_priv(netdev); + + netif_stop_queue(netdev); + bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE); + return 0; +} + +static unsigned int needed_room(unsigned int avail, unsigned int needed) +{ + if (avail >= needed) + return 0; + return needed - avail; +} + +static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev, + struct sk_buff *skb) +{ + unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE); + unsigned int pad = sizeof(u32) - skb->len % sizeof(u32); + unsigned int tail = needed_room(skb_tailroom(skb), pad); + struct bam_dmux_hdr *hdr; + int ret; + + if (head || tail || skb_cloned(skb)) { + ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC); + if (ret) + return ret; + } + + hdr = skb_push(skb, sizeof(*hdr)); + hdr->magic = BAM_DMUX_HDR_MAGIC; + hdr->signal = 0; + hdr->cmd = BAM_DMUX_CMD_DATA; + hdr->pad = pad; + hdr->ch = bndev->ch; + hdr->len = skb->len - sizeof(*hdr); + if (pad) + skb_put_zero(skb, pad); + + return 0; +} + +static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct bam_dmux_netdev *bndev = netdev_priv(netdev); + struct bam_dmux *dmux = bndev->dmux; + struct bam_dmux_skb_dma *skb_dma; + int active, ret; + + skb_dma = bam_dmux_tx_queue(dmux, skb); + if (!skb_dma) + return NETDEV_TX_BUSY; + + active = pm_runtime_get(dmux->dev); + if (active < 0 && active != -EINPROGRESS) + goto drop; + + ret = bam_dmux_tx_prepare_skb(bndev, skb); + if (ret) + goto drop; + + if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) + goto drop; + + if (active <= 0) { + /* Cannot sleep here so mark skb for wakeup handler and return */ + if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs), + &dmux->tx_deferred_skb)) + queue_pm_work(&dmux->tx_wakeup_work); + return NETDEV_TX_OK; + } + + if (!bam_dmux_skb_dma_submit_tx(skb_dma)) + goto drop; + + dma_async_issue_pending(dmux->tx); + return NETDEV_TX_OK; + +drop: + bam_dmux_tx_done(skb_dma); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void bam_dmux_tx_wakeup_work(struct work_struct *work) +{ + struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work); + unsigned long pending; + int ret, i; + + ret = pm_runtime_resume_and_get(dmux->dev); + if (ret < 0) { + dev_err(dmux->dev, "Failed to resume: %d\n", ret); + return; + } + + pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0); + if (!pending) + goto out; + + dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending); + for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) { + bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]); + } + dma_async_issue_pending(dmux->tx); + +out: + pm_runtime_mark_last_busy(dmux->dev); + pm_runtime_put_autosuspend(dmux->dev); +} + +static const struct net_device_ops bam_dmux_ops = { + .ndo_open = bam_dmux_netdev_open, + .ndo_stop = bam_dmux_netdev_stop, + .ndo_start_xmit = bam_dmux_netdev_start_xmit, +}; + +static const struct device_type wwan_type = { + .name = "wwan", +}; + +static void bam_dmux_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &bam_dmux_ops; + + dev->type = ARPHRD_RAWIP; + SET_NETDEV_DEVTYPE(dev, &wwan_type); + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE; + dev->needed_headroom = sizeof(struct bam_dmux_hdr); + dev->needed_tailroom = sizeof(u32); /* word-aligned */ + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + + /* This perm addr will be used as interface identifier by IPv6 */ + dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->perm_addr); +} + +static void bam_dmux_register_netdev_work(struct work_struct *work) +{ + struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work); + struct bam_dmux_netdev *bndev; + struct net_device *netdev; + int ch, ret; + + for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) { + if (dmux->netdevs[ch]) + continue; + + netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM, + bam_dmux_netdev_setup); + if (!netdev) + return; + + SET_NETDEV_DEV(netdev, dmux->dev); + netdev->dev_port = ch; + + bndev = netdev_priv(netdev); + bndev->dmux = dmux; + bndev->ch = ch; + + ret = register_netdev(netdev); + if (ret) { + dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n", + ch, ret); + free_netdev(netdev); + return; + } + + dmux->netdevs[ch] = netdev; + } +} + +static void bam_dmux_rx_callback(void *data); + +static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma) +{ + struct bam_dmux *dmux = skb_dma->dmux; + struct dma_async_tx_descriptor *desc; + + desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr, + skb_dma->skb->len, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n"); + return false; + } + + desc->callback = bam_dmux_rx_callback; + desc->callback_param = skb_dma; + desc->cookie = dmaengine_submit(desc); + return true; +} + +static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp) +{ + if (!skb_dma->skb) { + skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp); + if (!skb_dma->skb) + return false; + skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE); + } + + return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) && + bam_dmux_skb_dma_submit_rx(skb_dma); +} + +static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma) +{ + struct bam_dmux *dmux = skb_dma->dmux; + struct sk_buff *skb = skb_dma->skb; + struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data; + struct net_device *netdev = dmux->netdevs[hdr->ch]; + + if (!netdev || !netif_running(netdev)) { + dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch); + return; + } + + if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) { + dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n", + hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE); + return; + } + + skb_dma->skb = NULL; /* Hand over to network stack */ + + skb_pull(skb, sizeof(*hdr)); + skb_trim(skb, hdr->len); + skb->dev = netdev; + + /* Only Raw-IP/QMAP is supported by this driver */ + switch (skb->data[0] & 0xf0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } + + netif_receive_skb(skb); +} + +static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr) +{ + struct net_device *netdev = dmux->netdevs[hdr->ch]; + + dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch); + + if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) { + dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch); + return; + } + + if (netdev) { + netif_device_attach(netdev); + } else { + /* Cannot sleep here, schedule work to register the netdev */ + schedule_work(&dmux->register_netdev_work); + } +} + +static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr) +{ + struct net_device *netdev = dmux->netdevs[hdr->ch]; + + dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch); + + if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) { + dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch); + return; + } + + if (netdev) + netif_device_detach(netdev); +} + +static void bam_dmux_rx_callback(void *data) +{ + struct bam_dmux_skb_dma *skb_dma = data; + struct bam_dmux *dmux = skb_dma->dmux; + struct sk_buff *skb = skb_dma->skb; + struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data; + + bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE); + + if (hdr->magic != BAM_DMUX_HDR_MAGIC) { + dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic); + goto out; + } + + if (hdr->ch >= BAM_DMUX_NUM_CH) { + dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch); + goto out; + } + + switch (hdr->cmd) { + case BAM_DMUX_CMD_DATA: + bam_dmux_cmd_data(skb_dma); + break; + case BAM_DMUX_CMD_OPEN: + bam_dmux_cmd_open(dmux, hdr); + break; + case BAM_DMUX_CMD_CLOSE: + bam_dmux_cmd_close(dmux, hdr); + break; + default: + dev_err(dmux->dev, "Unsupported command %u on channel %u\n", + hdr->cmd, hdr->ch); + break; + } + +out: + if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC)) + dma_async_issue_pending(dmux->rx); +} + +static bool bam_dmux_power_on(struct bam_dmux *dmux) +{ + struct device *dev = dmux->dev; + struct dma_slave_config dma_rx_conf = { + .direction = DMA_DEV_TO_MEM, + .src_maxburst = BAM_DMUX_BUFFER_SIZE, + }; + int i; + + dmux->rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dmux->rx)) { + dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx); + dmux->rx = NULL; + return false; + } + dmaengine_slave_config(dmux->rx, &dma_rx_conf); + + for (i = 0; i < BAM_DMUX_NUM_SKB; i++) { + if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL)) + return false; + } + dma_async_issue_pending(dmux->rx); + + return true; +} + +static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[], + enum dma_data_direction dir) +{ + int i; + + for (i = 0; i < BAM_DMUX_NUM_SKB; i++) { + struct bam_dmux_skb_dma *skb_dma = &skbs[i]; + + if (skb_dma->addr) + bam_dmux_skb_dma_unmap(skb_dma, dir); + if (skb_dma->skb) { + dev_kfree_skb(skb_dma->skb); + skb_dma->skb = NULL; + } + } +} + +static void bam_dmux_power_off(struct bam_dmux *dmux) +{ + if (dmux->tx) { + dmaengine_terminate_sync(dmux->tx); + dma_release_channel(dmux->tx); + dmux->tx = NULL; + } + + if (dmux->rx) { + dmaengine_terminate_sync(dmux->rx); + dma_release_channel(dmux->rx); + dmux->rx = NULL; + } + + bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE); +} + +static irqreturn_t bam_dmux_pc_irq(int irq, void *data) +{ + struct bam_dmux *dmux = data; + bool new_state = !dmux->pc_state; + + dev_dbg(dmux->dev, "pc: %u\n", new_state); + + if (new_state) { + if (bam_dmux_power_on(dmux)) + bam_dmux_pc_ack(dmux); + else + bam_dmux_power_off(dmux); + } else { + bam_dmux_power_off(dmux); + bam_dmux_pc_ack(dmux); + } + + dmux->pc_state = new_state; + wake_up_all(&dmux->pc_wait); + + return IRQ_HANDLED; +} + +static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data) +{ + struct bam_dmux *dmux = data; + + dev_dbg(dmux->dev, "pc ack\n"); + complete_all(&dmux->pc_ack_completion); + + return IRQ_HANDLED; +} + +static int bam_dmux_runtime_suspend(struct device *dev) +{ + struct bam_dmux *dmux = dev_get_drvdata(dev); + + dev_dbg(dev, "runtime suspend\n"); + bam_dmux_pc_vote(dmux, false); + + return 0; +} + +static int __maybe_unused bam_dmux_runtime_resume(struct device *dev) +{ + struct bam_dmux *dmux = dev_get_drvdata(dev); + + dev_dbg(dev, "runtime resume\n"); + + /* Wait until previous power down was acked */ + if (!wait_for_completion_timeout(&dmux->pc_ack_completion, + BAM_DMUX_REMOTE_TIMEOUT)) + return -ETIMEDOUT; + + /* Vote for power state */ + bam_dmux_pc_vote(dmux, true); + + /* Wait for ack */ + if (!wait_for_completion_timeout(&dmux->pc_ack_completion, + BAM_DMUX_REMOTE_TIMEOUT)) { + bam_dmux_pc_vote(dmux, false); + return -ETIMEDOUT; + } + + /* Wait until we're up */ + if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state, + BAM_DMUX_REMOTE_TIMEOUT)) { + bam_dmux_pc_vote(dmux, false); + return -ETIMEDOUT; + } + + /* Ensure that we actually initialized successfully */ + if (!dmux->rx) { + bam_dmux_pc_vote(dmux, false); + return -ENXIO; + } + + /* Request TX channel if necessary */ + if (dmux->tx) + return 0; + + dmux->tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dmux->rx)) { + dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx); + dmux->tx = NULL; + bam_dmux_runtime_suspend(dev); + return -ENXIO; + } + + return 0; +} + +static int bam_dmux_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct bam_dmux *dmux; + int ret, pc_ack_irq, i; + unsigned int bit; + + dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL); + if (!dmux) + return -ENOMEM; + + dmux->dev = dev; + platform_set_drvdata(pdev, dmux); + + dmux->pc_irq = platform_get_irq_byname(pdev, "pc"); + if (dmux->pc_irq < 0) + return dmux->pc_irq; + + pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack"); + if (pc_ack_irq < 0) + return pc_ack_irq; + + dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit); + if (IS_ERR(dmux->pc)) + return dev_err_probe(dev, PTR_ERR(dmux->pc), + "Failed to get pc state\n"); + dmux->pc_mask = BIT(bit); + + dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit); + if (IS_ERR(dmux->pc_ack)) + return dev_err_probe(dev, PTR_ERR(dmux->pc_ack), + "Failed to get pc-ack state\n"); + dmux->pc_ack_mask = BIT(bit); + + init_waitqueue_head(&dmux->pc_wait); + init_completion(&dmux->pc_ack_completion); + complete_all(&dmux->pc_ack_completion); + + spin_lock_init(&dmux->tx_lock); + INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work); + INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work); + + for (i = 0; i < BAM_DMUX_NUM_SKB; i++) { + dmux->rx_skbs[i].dmux = dmux; + dmux->tx_skbs[i].dmux = dmux; + } + + /* Runtime PM manages our own power vote. + * Note that the RX path may be active even if we are runtime suspended, + * since it is controlled by the remote side. + */ + pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq, + IRQF_ONESHOT, NULL, dmux); + if (ret) + return ret; + + ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq, + IRQF_ONESHOT, NULL, dmux); + if (ret) + return ret; + + ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL, + &dmux->pc_state); + if (ret) + return ret; + + /* Check if remote finished initialization before us */ + if (dmux->pc_state) { + if (bam_dmux_power_on(dmux)) + bam_dmux_pc_ack(dmux); + else + bam_dmux_power_off(dmux); + } + + return 0; +} + +static int bam_dmux_remove(struct platform_device *pdev) +{ + struct bam_dmux *dmux = platform_get_drvdata(pdev); + struct device *dev = dmux->dev; + LIST_HEAD(list); + int i; + + /* Unregister network interfaces */ + cancel_work_sync(&dmux->register_netdev_work); + rtnl_lock(); + for (i = 0; i < BAM_DMUX_NUM_CH; ++i) + if (dmux->netdevs[i]) + unregister_netdevice_queue(dmux->netdevs[i], &list); + unregister_netdevice_many(&list); + rtnl_unlock(); + cancel_work_sync(&dmux->tx_wakeup_work); + + /* Drop our own power vote */ + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + bam_dmux_runtime_suspend(dev); + pm_runtime_set_suspended(dev); + + /* Try to wait for remote side to drop power vote */ + if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT)) + dev_err(dev, "Timed out waiting for remote side to suspend\n"); + + /* Make sure everything is cleaned up before we return */ + disable_irq(dmux->pc_irq); + bam_dmux_power_off(dmux); + bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE); + + return 0; +} + +static const struct dev_pm_ops bam_dmux_pm_ops = { + SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL) +}; + +static const struct of_device_id bam_dmux_of_match[] = { + { .compatible = "qcom,bam-dmux" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bam_dmux_of_match); + +static struct platform_driver bam_dmux_driver = { + .probe = bam_dmux_probe, + .remove = bam_dmux_remove, + .driver = { + .name = "bam-dmux", + .pm = &bam_dmux_pm_ops, + .of_match_table = bam_dmux_of_match, + }, +}; +module_platform_driver(bam_dmux_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver"); +MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>"); diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index d293ab688044..1508dc2a497b 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -3,6 +3,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/debugfs.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/idr.h> @@ -25,6 +26,7 @@ static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ static struct class *wwan_class; static int wwan_major; +static struct dentry *wwan_debugfs_dir; #define to_wwan_dev(d) container_of(d, struct wwan_device, dev) #define to_wwan_port(d) container_of(d, struct wwan_port, dev) @@ -40,6 +42,7 @@ static int wwan_major; * @port_id: Current available port ID to pick. * @ops: wwan device ops * @ops_ctxt: context to pass to ops + * @debugfs_dir: WWAN device debugfs dir */ struct wwan_device { unsigned int id; @@ -47,6 +50,9 @@ struct wwan_device { atomic_t port_id; const struct wwan_ops *ops; void *ops_ctxt; +#ifdef CONFIG_WWAN_DEBUGFS + struct dentry *debugfs_dir; +#endif }; /** @@ -142,6 +148,20 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name) return to_wwan_dev(dev); } +#ifdef CONFIG_WWAN_DEBUGFS +struct dentry *wwan_get_debugfs_dir(struct device *parent) +{ + struct wwan_device *wwandev; + + wwandev = wwan_dev_get_by_parent(parent); + if (IS_ERR(wwandev)) + return ERR_CAST(wwandev); + + return wwandev->debugfs_dir; +} +EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); +#endif + /* This function allocates and registers a new WWAN device OR if a WWAN device * already exist for the given parent, it gets a reference and return it. * This function is not exported (for now), it is called indirectly via @@ -189,6 +209,12 @@ static struct wwan_device *wwan_create_dev(struct device *parent) goto done_unlock; } +#ifdef CONFIG_WWAN_DEBUGFS + wwandev->debugfs_dir = + debugfs_create_dir(kobject_name(&wwandev->dev.kobj), + wwan_debugfs_dir); +#endif + done_unlock: mutex_unlock(&wwan_register_lock); @@ -218,10 +244,14 @@ static void wwan_remove_dev(struct wwan_device *wwandev) else ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); - if (!ret) + if (!ret) { +#ifdef CONFIG_WWAN_DEBUGFS + debugfs_remove_recursive(wwandev->debugfs_dir); +#endif device_unregister(&wwandev->dev); - else + } else { put_device(&wwandev->dev); + } mutex_unlock(&wwan_register_lock); } @@ -1117,6 +1147,10 @@ static int __init wwan_init(void) goto destroy; } +#ifdef CONFIG_WWAN_DEBUGFS + wwan_debugfs_dir = debugfs_create_dir("wwan", NULL); +#endif + return 0; destroy: @@ -1128,6 +1162,7 @@ unregister: static void __exit wwan_exit(void) { + debugfs_remove_recursive(wwan_debugfs_dir); __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port"); rtnl_link_unregister(&wwan_rtnl_link_ops); class_destroy(wwan_class); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 4a16d6e33c09..d9dea4829c86 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned int rx_queue_max; unsigned int rx_queue_len; unsigned long last_rx_time; + unsigned int rx_slots_needed; bool stalled; struct xenvif_copy_state rx_copy; diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index accc991d153f..dbac4c03d21a 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -33,28 +33,36 @@ #include <xen/xen.h> #include <xen/events.h> -static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) +/* + * Update the needed ring page slots for the first SKB queued. + * Note that any call sequence outside the RX thread calling this function + * needs to wake up the RX thread via a call of xenvif_kick_thread() + * afterwards in order to avoid a race with putting the thread to sleep. + */ +static void xenvif_update_needed_slots(struct xenvif_queue *queue, + const struct sk_buff *skb) { - RING_IDX prod, cons; - struct sk_buff *skb; - int needed; - unsigned long flags; - - spin_lock_irqsave(&queue->rx_queue.lock, flags); + unsigned int needed = 0; - skb = skb_peek(&queue->rx_queue); - if (!skb) { - spin_unlock_irqrestore(&queue->rx_queue.lock, flags); - return false; + if (skb) { + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) + needed++; + if (skb->sw_hash) + needed++; } - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); - if (skb_is_gso(skb)) - needed++; - if (skb->sw_hash) - needed++; + WRITE_ONCE(queue->rx_slots_needed, needed); +} - spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + unsigned int needed; + + needed = READ_ONCE(queue->rx_slots_needed); + if (!needed) + return false; do { prod = queue->rx.sring->req_prod; @@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) spin_lock_irqsave(&queue->rx_queue.lock, flags); - __skb_queue_tail(&queue->rx_queue, skb); - - queue->rx_queue_len += skb->len; - if (queue->rx_queue_len > queue->rx_queue_max) { + if (queue->rx_queue_len >= queue->rx_queue_max) { struct net_device *dev = queue->vif->dev; netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); + kfree_skb(skb); + queue->vif->dev->stats.rx_dropped++; + } else { + if (skb_queue_empty(&queue->rx_queue)) + xenvif_update_needed_slots(queue, skb); + + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; } spin_unlock_irqrestore(&queue->rx_queue.lock, flags); @@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) skb = __skb_dequeue(&queue->rx_queue); if (skb) { + xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); + queue->rx_queue_len -= skb->len; if (queue->rx_queue_len < queue->rx_queue_max) { struct netdev_queue *txq; @@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) break; xenvif_rx_dequeue(queue); kfree_skb(skb); + queue->vif->dev->stats.rx_dropped++; } } @@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_copy_flush(queue); } -static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) +static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) { RING_IDX prod, cons; prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; + return prod - cons; +} + +static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) +{ + unsigned int needed = READ_ONCE(queue->rx_slots_needed); + return !queue->stalled && - prod - cons < 1 && + xenvif_rx_queue_slots(queue) < needed && time_after(jiffies, queue->last_rx_time + queue->vif->stall_timeout); } static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) { - RING_IDX prod, cons; - - prod = queue->rx.sring->req_prod; - cons = queue->rx.req_cons; + unsigned int needed = READ_ONCE(queue->rx_slots_needed); - return queue->stalled && prod - cons >= 1; + return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; } bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 911f43986a8c..8b18246ad999 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -148,6 +148,9 @@ struct netfront_queue { grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + unsigned int rx_rsp_unconsumed; + spinlock_t rx_cons_lock; + struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; }; @@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev) return 0; } -static void xennet_tx_buf_gc(struct netfront_queue *queue) +static bool xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; bool more_to_do; + bool work_done = false; const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); @@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) for (cons = queue->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response txrsp; + work_done = true; + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); if (txrsp.status == XEN_NETIF_RSP_NULL) continue; @@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) xennet_maybe_wake_tx(queue); - return; + return work_done; err: queue->info->broken = true; dev_alert(dev, "Disabled for further use\n"); + + return work_done; } struct xennet_gnttab_make_txreq { @@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_cons_lock, flags); + queue->rx.rsp_cons = val; + queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); +} + static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { @@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue, xennet_move_rx_slot(queue, skb, ref); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); - queue->rx.rsp_cons = cons; + xennet_set_rx_rsp_cons(queue, cons); return err; } @@ -930,7 +948,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); } return act; @@ -1039,7 +1057,7 @@ next: } if (unlikely(err)) - queue->rx.rsp_cons = cons + slots; + xennet_set_rx_rsp_cons(queue, cons + slots); return err; } @@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - queue->rx.rsp_cons = ++cons + skb_queue_len(list); + xennet_set_rx_rsp_cons(queue, + ++cons + skb_queue_len(list)); kfree_skb(nskb); return -ENOENT; } @@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, kfree_skb(nskb); } - queue->rx.rsp_cons = cons; + xennet_set_rx_rsp_cons(queue, cons); return 0; } @@ -1229,7 +1248,9 @@ err: if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); - queue->rx.rsp_cons += skb_queue_len(&tmpq); + xennet_set_rx_rsp_cons(queue, + queue->rx.rsp_cons + + skb_queue_len(&tmpq)); goto err; } } @@ -1253,7 +1274,8 @@ err: __skb_queue_tail(&rxq, skb); - i = ++queue->rx.rsp_cons; + i = queue->rx.rsp_cons + 1; + xennet_set_rx_rsp_cons(queue, i); work_done++; } if (need_xdp_flush) @@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev, return 0; } -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; unsigned long flags; - if (queue->info->broken) - return IRQ_HANDLED; + if (unlikely(queue->info->broken)) + return false; spin_lock_irqsave(&queue->tx_lock, flags); - xennet_tx_buf_gc(queue); + if (xennet_tx_buf_gc(queue)) + *eoi = 0; spin_unlock_irqrestore(&queue->tx_lock, flags); + return true; +} + +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_tx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } -static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) { - struct netfront_queue *queue = dev_id; - struct net_device *dev = queue->info->netdev; + unsigned int work_queued; + unsigned long flags; - if (queue->info->broken) - return IRQ_HANDLED; + if (unlikely(queue->info->broken)) + return false; + + spin_lock_irqsave(&queue->rx_cons_lock, flags); + work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); + if (work_queued > queue->rx_rsp_unconsumed) { + queue->rx_rsp_unconsumed = work_queued; + *eoi = 0; + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { + const struct device *dev = &queue->info->netdev->dev; + + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); + dev_alert(dev, "RX producer index going backwards\n"); + dev_alert(dev, "Disabled for further use\n"); + queue->info->broken = true; + return false; + } + spin_unlock_irqrestore(&queue->rx_cons_lock, flags); - if (likely(netif_carrier_ok(dev) && - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) napi_schedule(&queue->napi); + return true; +} + +static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) +{ + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (likely(xennet_handle_rx(dev_id, &eoiflag))) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { - xennet_tx_interrupt(irq, dev_id); - xennet_rx_interrupt(irq, dev_id); + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + + if (xennet_handle_tx(dev_id, &eoiflag) && + xennet_handle_rx(dev_id, &eoiflag)) + xen_irq_lateeoi(irq, eoiflag); + return IRQ_HANDLED; } @@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue) if (err < 0) goto fail; - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_interrupt, - 0, queue->info->netdev->name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_interrupt, 0, + queue->info->netdev->name, + queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; @@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue) snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_evtchn_to_irqhandler(queue->tx_evtchn, - xennet_tx_interrupt, - 0, queue->tx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, + xennet_tx_interrupt, 0, + queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_evtchn_to_irqhandler(queue->rx_evtchn, - xennet_rx_interrupt, - 0, queue->rx_irq_name, queue); + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, + xennet_rx_interrupt, 0, + queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; @@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); + spin_lock_init(&queue->rx_cons_lock); timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); |